diff --git a/.cproject b/.cproject
index 243637d65..cfdc18ef9 100644
--- a/.cproject
+++ b/.cproject
@@ -311,14 +311,6 @@
true
true
-
- make
- -j2
- testGaussianFactor.run
- true
- true
- true
-
make
-j2
@@ -345,6 +337,7 @@
make
+
tests/testBayesTree.run
true
false
@@ -352,6 +345,7 @@
make
+
testBinaryBayesNet.run
true
false
@@ -399,6 +393,7 @@
make
+
testSymbolicBayesNet.run
true
false
@@ -406,6 +401,7 @@
make
+
tests/testSymbolicFactor.run
true
false
@@ -413,6 +409,7 @@
make
+
testSymbolicFactorGraph.run
true
false
@@ -428,11 +425,20 @@
make
+
tests/testBayesTree
true
false
true
+
+ make
+ -j2
+ testGaussianFactor.run
+ true
+ true
+ true
+
make
-j2
@@ -459,7 +465,6 @@
make
-
testGraph.run
true
false
@@ -531,7 +536,6 @@
make
-
testInference.run
true
false
@@ -539,7 +543,6 @@
make
-
testGaussianFactor.run
true
false
@@ -547,7 +550,6 @@
make
-
testJunctionTree.run
true
false
@@ -555,7 +557,6 @@
make
-
testSymbolicBayesNet.run
true
false
@@ -563,7 +564,6 @@
make
-
testSymbolicFactorGraph.run
true
false
@@ -633,22 +633,6 @@
false
true
-
- make
- -j2
- all
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
make
-j2
@@ -665,6 +649,22 @@
true
true
+
+ make
+ -j2
+ all
+ true
+ true
+ true
+
+
+ make
+ -j2
+ clean
+ true
+ true
+ true
+
make
-j2
@@ -689,18 +689,26 @@
true
true
-
+
make
- -j2 VERBOSE=1
- check.nonlinear
+ -j2
+ all
true
- false
+ true
true
-
+
make
- -j5
- timing.nonlinear
+ -j2
+ check
+ true
+ true
+ true
+
+
+ make
+ -j2
+ clean
true
true
true
@@ -753,26 +761,34 @@
true
true
-
+
make
- -j2
- all
+ -j2 VERBOSE=1
+ check.nonlinear
+ true
+ false
+ true
+
+
+ make
+ -j5
+ timing.nonlinear
true
true
true
-
+
make
- -j2
- check
+ -j5
+ nonlinear.testValues.run
true
true
true
-
+
make
- -j2
- clean
+ -j5
+ nonlinear.testOrdering.run
true
true
true
@@ -1123,6 +1139,7 @@
make
+
testErrors.run
true
false
@@ -1602,7 +1619,6 @@
make
-
testSimulated2DOriented.run
true
false
@@ -1642,7 +1658,6 @@
make
-
testSimulated2D.run
true
false
@@ -1650,7 +1665,6 @@
make
-
testSimulated3D.run
true
false
@@ -1744,10 +1758,10 @@
true
true
-
+
make
- -j2
- tests/testNoiseModel.run
+ -j5
+ linear.testNoiseModel.run
true
true
true
@@ -1824,6 +1838,14 @@
true
true
+
+ make
+ -j5
+ linear.testSerializationLinear.run
+ true
+ true
+ true
+
make
-j2
@@ -1914,7 +1936,6 @@
make
-
tests/testGaussianISAM2
true
false
@@ -1936,6 +1957,93 @@
true
true
+
+ make
+ -j2
+ install
+ true
+ true
+ true
+
+
+ make
+ -j2
+ clean
+ true
+ true
+ true
+
+
+ make
+ -j5
+ check
+ true
+ true
+ true
+
+
+ make
+ -j5
+ all
+ true
+ true
+ true
+
+
+ cmake
+ ..
+ true
+ false
+ true
+
+
+ make
+ -j5
+ gtsam-shared
+ true
+ true
+ true
+
+
+ make
+ -j5
+ gtsam-static
+ true
+ true
+ true
+
+
+ make
+ -j5
+ timing
+ true
+ true
+ true
+
+
+ make
+ -j5
+ examples
+ true
+ true
+ true
+
+
+ make
+ -j5
+ VERBOSE=1 all
+ true
+ true
+ true
+
+
+ make
+ -j5
+ VERBOSE=1 check
+ true
+ true
+ true
+
make
-j2
@@ -2032,23 +2140,7 @@
true
true
-
- make
- -j2
- install
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
+
make
-j2
check
@@ -2056,7 +2148,23 @@
true
true
-
+
+ make
+ -j2
+ clean
+ true
+ true
+ true
+
+
+ make
+ -j2
+ install
+ true
+ true
+ true
+
+
make
-j2
all
@@ -2064,46 +2172,13 @@
true
true
-
+
cmake
..
-
true
false
true
-
- make
- -j5
- gtsam-shared
- true
- true
- true
-
-
- make
- -j5
- gtsam-static
- true
- true
- true
-
-
- make
- -j5
- timing
- true
- true
- true
-
-
- make
- -j5
- examples
- true
- true
- true
-
make
-j5
@@ -2144,46 +2219,6 @@
true
true
-
- make
- -j2
- check
- true
- true
- true
-
-
- make
- -j2
- clean
- true
- true
- true
-
-
- make
- -j2
- install
- true
- true
- true
-
-
- make
- -j2
- all
- true
- true
- true
-
-
- cmake
- ..
-
- true
- false
- true
-
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b8e560e7a..1067692dd 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -2,37 +2,58 @@ project(GTSAM CXX C)
cmake_minimum_required(VERSION 2.6)
# Set the version number for the library
-set (GTSAM_VERSION_MAJOR 0)
+set (GTSAM_VERSION_MAJOR 1)
set (GTSAM_VERSION_MINOR 9)
-set (GTSAM_VERSION_PATCH 3)
+set (GTSAM_VERSION_PATCH 0)
# Set the default install path to home
set (CMAKE_INSTALL_PREFIX ${HOME} CACHE DOCSTRING "Install prefix for library")
+# Use macros for creating tests/timing scripts
+set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
+include(GtsamTesting)
+include(GtsamPrinting)
+
# guard against in-source builds
if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
message(FATAL_ERROR "In-source builds not allowed. Please make a new directory (called a build directory) and run CMake from there. You may need to remove CMakeCache.txt. ")
endif()
-# guard against bad build-type strings
-if (NOT CMAKE_BUILD_TYPE)
- set(CMAKE_BUILD_TYPE "Debug")
+# Default to Debug mode
+if(NOT FIRST_PASS_DONE)
+ set(CMAKE_BUILD_TYPE "Debug" CACHE STRING
+ "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel."
+ FORCE)
endif()
+# Check build types
+if(${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} VERSION_GREATER 2.8 OR ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} VERSION_EQUAL 2.8)
+ set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS Debug Release Timing Profiling RelWithDebInfo MinSizeRel)
+endif()
string(TOLOWER "${CMAKE_BUILD_TYPE}" cmake_build_type_tolower)
if( NOT cmake_build_type_tolower STREQUAL "debug"
AND NOT cmake_build_type_tolower STREQUAL "release"
+ AND NOT cmake_build_type_tolower STREQUAL "timing"
+ AND NOT cmake_build_type_tolower STREQUAL "profiling"
AND NOT cmake_build_type_tolower STREQUAL "relwithdebinfo")
- message(FATAL_ERROR "Unknown build type \"${CMAKE_BUILD_TYPE}\". Allowed values are Debug, Release, RelWithDebInfo (case-insensitive).")
+ message(FATAL_ERROR "Unknown build type \"${CMAKE_BUILD_TYPE}\". Allowed values are Debug, Release, Timing, Profiling, RelWithDebInfo (case-insensitive).")
endif()
-# Add debugging flags
-set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -fno-inline -Wall")
-set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-inline -Wall")
-set(CMAKE_C_FLAGS_RELWITHDEBINFO "-g -fno-inline -Wall -DNDEBUG -DEIGEN_NO_DEBUG")
-set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-g -fno-inline -Wall -DNDEBUG -DEIGEN_NO_DEBUG")
-set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -DNDEBUG -Wall -DEIGEN_NO_DEBUG")
-set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -DNDEBUG -Wall -DEIGEN_NO_DEBUG")
+# Add debugging flags but only on the first pass
+if(NOT FIRST_PASS_DONE)
+ set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -fno-inline -Wall" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
+ set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fno-inline -Wall" CACHE STRING "Flags used by the compiler during debug builds." FORCE)
+ set(CMAKE_C_FLAGS_RELWITHDEBINFO "-g -fno-inline -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
+ set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-g -fno-inline -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during relwithdebinfo builds." FORCE)
+ set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
+ set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -Wall" CACHE STRING "Flags used by the compiler during release builds." FORCE)
+ set(CMAKE_C_FLAGS_TIMING "${CMAKE_C_FLAGS_RELEASE} -DENABLE_TIMING" CACHE STRING "Flags used by the compiler during timing builds." FORCE)
+ set(CMAKE_CXX_FLAGS_TIMING "${CMAKE_CXX_FLAGS_RELEASE} -DENABLE_TIMING" CACHE STRING "Flags used by the compiler during timing builds." FORCE)
+ mark_as_advanced(CMAKE_C_FLAGS_TIMING CMAKE_CXX_FLAGS_TIMING)
+ set(CMAKE_C_FLAGS_PROFILING "-g -O2 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
+ set(CMAKE_CXX_FLAGS_PROFILING "-g -O2 -Wall -DNDEBUG" CACHE STRING "Flags used by the compiler during profiling builds." FORCE)
+ mark_as_advanced(CMAKE_C_FLAGS_PROFILING CMAKE_CXX_FLAGS_PROFILING)
+endif()
# Configurable Options
option(GTSAM_BUILD_TESTS "Enable/Disable building of tests" ON)
@@ -41,6 +62,10 @@ option(GTSAM_BUILD_EXAMPLES "Enable/Disable building of examples" ON)
option(GTSAM_BUILD_WRAP "Enable/Disable building of matlab wrap utility (necessary for matlab interface)" ON)
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices" OFF)
option(GTSAM_BUILD_CONVENIENCE_LIBRARIES "Enable/Disable use of convenience libraries for faster development rebuilds, but slower install" ON)
+option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" ON)
+option(GTSAM_INSTALL_MATLAB_EXAMPLES "Enable/Disable installation of matlab examples" ON)
+option(GTSAM_INSTALL_MATLAB_TESTS "Enable/Disable installation of matlab tests" ON)
+option(GTSAM_INSTALL_WRAP "Enable/Disable installation of wrap utility" ON)
# Add the Quaternion Build Flag if requested
if (GTSAM_USE_QUATERNIONS)
@@ -59,10 +84,6 @@ if (GTSAM_BUILD_TESTS)
include(CTest)
endif()
-# Use macros for creating tests/timing scripts
-set(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
-include(GtsamTesting)
-
# Enable make check (http://www.cmake.org/Wiki/CMakeEmulateMakeCheck)
add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND})
add_custom_target(timing)
@@ -97,3 +118,48 @@ endif(GTSAM_BUILD_WRAP)
if (GTSAM_BUILD_EXAMPLES)
add_subdirectory(examples)
endif(GTSAM_BUILD_EXAMPLES)
+
+# Mark that first pass is done
+set(FIRST_PASS_DONE true CACHE BOOL "Internally used to mark whether cmake has been run multiple times")
+mark_as_advanced(FIRST_PASS_DONE)
+
+# print configuration variables
+message(STATUS "===============================================================")
+message(STATUS "================ Configuration Options ======================")
+message(STATUS "Build flags ")
+print_config_flag(${GTSAM_BUILD_TIMING} "Build Timing scripts ")
+print_config_flag(${GTSAM_BUILD_EXAMPLES} "Build Examples ")
+print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
+print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
+print_config_flag(${GTSAM_BUILD_CONVENIENCE_LIBRARIES} "Build Convenience Libraries")
+string(TOUPPER "${CMAKE_BUILD_TYPE}" cmake_build_type_toupper)
+message(STATUS " Build type : ${CMAKE_BUILD_TYPE}")
+message(STATUS " C compilation flags : ${CMAKE_C_FLAGS} ${CMAKE_C_FLAGS_${cmake_build_type_toupper}}")
+message(STATUS " C++ compilation flags : ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${cmake_build_type_toupper}}")
+
+message(STATUS "GTSAM flags ")
+print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3")
+
+message(STATUS "MATLAB toolbox flags ")
+print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install matlab toolbox ")
+print_config_flag(${GTSAM_INSTALL_MATLAB_EXAMPLES} "Install matlab examples ")
+print_config_flag(${GTSAM_INSTALL_MATLAB_TESTS} "Install matlab tests ")
+print_config_flag(${GTSAM_INSTALL_WRAP} "Install wrap utility ")
+message(STATUS "===============================================================")
+
+# Set up CPack
+set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "GTSAM")
+set(CPACK_PACKAGE_VENDOR "Frank Dellaert, Georgia Institute of Technology")
+set(CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/README")
+set(CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/LICENSE")
+set(CPACK_PACKAGE_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
+set(CPACK_PACKAGE_VERSION_MINOR ${GTSAM_VERSION_MINOR})
+set(CPACK_PACKAGE_VERSION_PATCH ${GTSAM_VERSION_PATCH})
+set(CPACK_PACKAGE_INSTALL_DIRECTORY "CMake ${CMake_VERSION_MAJOR}.${CMake_VERSION_MINOR}")
+set(CPACK_INSTALLED_DIRECTORIES "doc" ".") # Include doc directory
+set(CPACK_SOURCE_IGNORE_FILES "/build;/\\\\.;/makedoc.sh$")
+set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
+#set(CPACK_SOURCE_PACKAGE_FILE_NAME "gtsam-aspn${GTSAM_VERSION_PATCH}") # Used for creating ASPN tarballs
+set(CPACK_SOURCE_GENERATOR "TGZ")
+set(CPACK_GENERATOR "TGZ")
+include(CPack)
diff --git a/cmake/FindCppUnitLite.cmake b/cmake/FindCppUnitLite.cmake
index fba910218..57628b8fa 100644
--- a/cmake/FindCppUnitLite.cmake
+++ b/cmake/FindCppUnitLite.cmake
@@ -3,22 +3,19 @@
# The following variables will be defined:
#
# CppUnitLite_FOUND : TRUE if the package has been successfully found
-# CppUnitLite_INCLUDE_DIRS : paths to CppUnitLite's INCLUDE directories
+# CppUnitLite_INCLUDE_DIR : paths to CppUnitLite's INCLUDE directories
# CppUnitLite_LIBS : paths to CppUnitLite's libraries
-
# Find include dirs
find_path(_CppUnitLite_INCLUDE_DIR CppUnitLite/Test.h
- PATHS ${GTSAM_ROOT} ${CMAKE_INSTALL_PREFIX}/include ${HOME}/include /usr/local/include /usr/include )
+ PATHS ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" /usr/local/include /usr/include )
# Find libraries
find_library(_CppUnitLite_LIB NAMES CppUnitLite
- HINTS ${_CppUnitLite_INCLUDE_DIR}/build/CppUnitLite ${_CppUnitLite_INCLUDE_DIR}/CppUnitLite)
-
-set (CppUnitLite_INCLUDE_DIRS ${_CppUnitLite_INCLUDE_DIR})
-set (CppUnitLite_LIBS ${_CppUnitLite_LIB})
-
+ HINTS ${CMAKE_INSTALL_PREFIX}/lib "$ENV{HOME}/lib" /usr/local/lib /usr/lib)
+set (CppUnitLite_INCLUDE_DIR ${_CppUnitLite_INCLUDE_DIR} CACHE STRING "CppUnitLite INCLUDE directories")
+set (CppUnitLite_LIBS ${_CppUnitLite_LIB} CACHE STRING "CppUnitLite libraries")
# handle the QUIETLY and REQUIRED arguments and set LIBXML2_FOUND to TRUE
# if all listed variables are TRUE
diff --git a/cmake/FindGTSAM.cmake b/cmake/FindGTSAM.cmake
index 052798184..6748ae462 100644
--- a/cmake/FindGTSAM.cmake
+++ b/cmake/FindGTSAM.cmake
@@ -3,23 +3,19 @@
# The following variables will be defined:
#
# GTSAM_FOUND : TRUE if the package has been successfully found
-# GTSAM_INCLUDE_DIRS : paths to GTSAM's INCLUDE directories
+# GTSAM_INCLUDE_DIR : paths to GTSAM's INCLUDE directories
# GTSAM_LIBS : paths to GTSAM's libraries
-
# Find include dirs
find_path(_gtsam_INCLUDE_DIR gtsam/inference/FactorGraph.h
- PATHS ${GTSAM_ROOT} ${CMAKE_INSTALL_PREFIX}/include ${HOME}/include /usr/local/include /usr/include )
+ PATHS ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" /usr/local/include /usr/include )
# Find libraries
find_library(_gtsam_LIB NAMES gtsam
- HINTS ${_gtsam_INCLUDE_DIR}/build-debug/gtsam/.libs ${_gtsam_INCLUDE_DIR}/build/gtsam/.libs ${_gtsam_INCLUDE_DIR}/gtsam/.libs
- NO_DEFAULT_PATH)
-
-set (GTSAM_INCLUDE_DIRS ${_gtsam_INCLUDE_DIR} CACHE STRING "GTSAM INCLUDE directories")
-set (GTSAM_LIBS ${_gtsam_LIB} CACHE STRING "GTSAM libraries")
-
+ HINTS ${CMAKE_INSTALL_PREFIX}/lib "$ENV{HOME}/lib" /usr/local/lib /usr/lib)
+set (GTSAM_INCLUDE_DIR ${_gtsam_INCLUDE_DIR} CACHE STRING "GTSAM INCLUDE directories")
+set (GTSAM_LIBS ${_gtsam_LIB} CACHE STRING "GTSAM libraries")
# handle the QUIETLY and REQUIRED arguments and set LIBXML2_FOUND to TRUE
# if all listed variables are TRUE
diff --git a/cmake/FindWrap.cmake b/cmake/FindWrap.cmake
new file mode 100644
index 000000000..733f6aa7d
--- /dev/null
+++ b/cmake/FindWrap.cmake
@@ -0,0 +1,30 @@
+# This is FindWrap.cmake
+# CMake module to locate the Wrap tool and header after installation package
+# The following variables will be defined:
+#
+# Wrap_FOUND : TRUE if the package has been successfully found
+# Wrap_CMD : command for executing wrap
+# Wrap_INCLUDE_DIR : paths to Wrap's INCLUDE directories
+
+# Find include dir
+find_path(_Wrap_INCLUDE_DIR wrap/matlab.h
+ PATHS ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" /usr/local/include /usr/include )
+
+# Find the installed executable
+find_program(_Wrap_CMD NAMES wrap
+ PATHS ${CMAKE_INSTALL_PREFIX}/bin "$ENV{HOME}/bin" /usr/local/bin /usr/bin )
+
+set (Wrap_INCLUDE_DIR ${_Wrap_INCLUDE_DIR} CACHE STRING "Wrap INCLUDE directories")
+set (Wrap_CMD ${_Wrap_CMD} CACHE STRING "Wrap executable location")
+
+# handle the QUIETLY and REQUIRED arguments and set LIBXML2_FOUND to TRUE
+# if all listed variables are TRUE
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(Wrap DEFAULT_MSG
+ _Wrap_INCLUDE_DIR _Wrap_CMD)
+
+mark_as_advanced(_Wrap_INCLUDE_DIR _Wrap_CMD )
+
+
+
+
diff --git a/cmake/GtsamMatlabWrap.cmake b/cmake/GtsamMatlabWrap.cmake
new file mode 100644
index 000000000..8c2098b1d
--- /dev/null
+++ b/cmake/GtsamMatlabWrap.cmake
@@ -0,0 +1,48 @@
+# Macros for using wrap functionality
+macro(find_mexextension)
+ ## Determine the mex extension
+ # Apple Macintosh (64-bit) mexmaci64
+ # Linux (32-bit) mexglx
+ # Linux (64-bit) mexa64
+ # Microsoft Windows (32-bit) mexw32
+ # Windows (64-bit) mexw64
+
+ # only support 64-bit apple
+ if(CMAKE_HOST_APPLE)
+ set(GTSAM_MEX_BIN_EXTENSION_default mexmaci64)
+ endif(CMAKE_HOST_APPLE)
+
+ if(NOT CMAKE_HOST_APPLE)
+ # check 64 bit
+ if( ${CMAKE_SIZEOF_VOID_P} EQUAL 4 )
+ set( HAVE_64_BIT 0 )
+ endif( ${CMAKE_SIZEOF_VOID_P} EQUAL 4 )
+
+ if( ${CMAKE_SIZEOF_VOID_P} EQUAL 8 )
+ set( HAVE_64_BIT 1 )
+ endif( ${CMAKE_SIZEOF_VOID_P} EQUAL 8 )
+
+ # Check for linux machines
+ if (CMAKE_HOST_UNIX)
+ if (HAVE_64_BIT)
+ set(GTSAM_MEX_BIN_EXTENSION_default mexa64)
+ else (HAVE_64_BIT)
+ set(GTSAM_MEX_BIN_EXTENSION_default mexglx)
+ endif (HAVE_64_BIT)
+ endif(CMAKE_HOST_UNIX)
+
+ # Check for windows machines
+ if (CMAKE_HOST_WIN32)
+ if (HAVE_64_BIT)
+ set(GTSAM_MEX_BIN_EXTENSION_default mexw64)
+ else (HAVE_64_BIT)
+ set(GTSAM_MEX_BIN_EXTENSION_default mexw32)
+ endif (HAVE_64_BIT)
+ endif(CMAKE_HOST_WIN32)
+ endif(NOT CMAKE_HOST_APPLE)
+
+ # Allow for setting mex extension manually
+ set(GTSAM_MEX_BIN_EXTENSION ${GTSAM_MEX_BIN_EXTENSION_default} CACHE DOCSTRING "Extension for matlab mex files")
+ message(STATUS "Detected Matlab mex extension: ${GTSAM_MEX_BIN_EXTENSION_default}")
+ message(STATUS "Current Matlab mex extension: ${GTSAM_MEX_BIN_EXTENSION}")
+endmacro(find_mexextension)
\ No newline at end of file
diff --git a/cmake/GtsamPrinting.cmake b/cmake/GtsamPrinting.cmake
new file mode 100644
index 000000000..b6f3ca4ef
--- /dev/null
+++ b/cmake/GtsamPrinting.cmake
@@ -0,0 +1,10 @@
+# print configuration variables
+# Usage:
+#print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
+macro(print_config_flag flag msg)
+ if ("${flag}" STREQUAL "ON")
+ message(STATUS " ${msg}: Enabled")
+ else ("${flag}" STREQUAL "ON")
+ message(STATUS " ${msg}: Disabled")
+ endif ("${flag}" STREQUAL "ON")
+endmacro(print_config_flag)
\ No newline at end of file
diff --git a/cmake/GtsamTesting.cmake b/cmake/GtsamTesting.cmake
index f2f345f11..28683c98b 100644
--- a/cmake/GtsamTesting.cmake
+++ b/cmake/GtsamTesting.cmake
@@ -6,7 +6,7 @@ macro(gtsam_add_tests subdir libs)
file(GLOB tests_srcs "tests/test*.cpp")
foreach(test_src ${tests_srcs})
get_filename_component(test_base ${test_src} NAME_WE)
- set( test_bin ${subdir}.${test_base} )
+ set( test_bin ${test_base} )
message(STATUS "Adding Test ${test_bin}")
add_executable(${test_bin} ${test_src})
add_dependencies(check.${subdir} ${test_bin})
@@ -30,7 +30,7 @@ macro(gtsam_add_external_tests subdir libs)
file(GLOB tests_srcs "tests/test*.cpp")
foreach(test_src ${tests_srcs})
get_filename_component(test_base ${test_src} NAME_WE)
- set( test_bin ${subdir}.${test_base} )
+ set( test_bin ${test_base} )
message(STATUS "Adding Test ${test_bin}")
add_executable(${test_bin} ${test_src})
add_dependencies(check.${subdir} ${test_bin})
@@ -48,7 +48,7 @@ macro(gtsam_add_timing subdir libs)
file(GLOB base_timing_srcs "tests/time*.cpp")
foreach(time_src ${base_timing_srcs})
get_filename_component(time_base ${time_src} NAME_WE)
- set( time_bin ${subdir}.${time_base} )
+ set( time_bin ${time_base} )
message(STATUS "Adding Timing Benchmark ${time_bin}")
add_executable(${time_bin} ${time_src})
add_dependencies(timing.${subdir} ${time_bin})
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index 27f85b2f7..55f2db60b 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -9,6 +9,7 @@ foreach(example_src ${example_srcs} )
add_dependencies(examples ${example_bin})
add_executable(${example_bin} ${example_src})
target_link_libraries(${example_bin} gtsam-static)
+ add_custom_target(${example_bin}.run ${EXECUTABLE_OUTPUT_PATH}${example_bin} ${ARGN})
endforeach(example_src)
add_subdirectory(vSLAMexample)
diff --git a/examples/PlanarSLAMSelfContained_advanced.cpp b/examples/PlanarSLAMSelfContained_advanced.cpp
index 65b201ab7..10802ee87 100644
--- a/examples/PlanarSLAMSelfContained_advanced.cpp
+++ b/examples/PlanarSLAMSelfContained_advanced.cpp
@@ -39,13 +39,13 @@
#include
#include
-// Main typedefs
-typedef gtsam::NonlinearOptimizer OptimizerSeqential; // optimization engine for this domain
-typedef gtsam::NonlinearOptimizer OptimizerMultifrontal; // optimization engine for this domain
-
using namespace std;
using namespace gtsam;
+// Main typedefs
+typedef NonlinearOptimizer OptimizerSeqential; // optimization engine for this domain
+typedef NonlinearOptimizer OptimizerMultifrontal; // optimization engine for this domain
+
/**
* In this version of the system we make the following assumptions:
* - All values are axis aligned
diff --git a/examples/matlab/Pose2SLAMExample_easy.m b/examples/matlab/Pose2SLAMExample_easy.m
index bbafba99d..6c359f314 100644
--- a/examples/matlab/Pose2SLAMExample_easy.m
+++ b/examples/matlab/Pose2SLAMExample_easy.m
@@ -39,7 +39,7 @@ graph.addOdometry(x2, x3, odom_measurement, odom_model);
%% Add measurements
% general noisemodel for measurements
-meas_model = SharedNoiseModel_sharedSigmas([0.1; 0.2]);
+meas_model = gtsamSharedNoiseModel_Sigmas([0.1; 0.2]);
% print
graph.print('full graph');
diff --git a/gtsam.h b/gtsam.h
index 688fc4fa7..26f3457c7 100644
--- a/gtsam.h
+++ b/gtsam.h
@@ -214,7 +214,7 @@ class CalibratedCamera {
CalibratedCamera(const Vector& v);
void print(string s) const;
- bool equals(const gtsam::Pose3& pose, double tol) const;
+ bool equals(const gtsam::CalibratedCamera& camera, double tol) const;
gtsam::Pose3 pose() const;
@@ -404,7 +404,7 @@ class Ordering {
Ordering();
void print(string s) const;
bool equals(const gtsam::Ordering& ord, double tol) const;
- void push_back(string key);
+ void push_back(size_t key);
};
class NonlinearOptimizationParameters {
diff --git a/gtsam/base/LieScalar.h b/gtsam/base/LieScalar.h
index 51439c6f1..505f06921 100644
--- a/gtsam/base/LieScalar.h
+++ b/gtsam/base/LieScalar.h
@@ -17,6 +17,7 @@
#pragma once
+#include
#include
namespace gtsam {
@@ -24,7 +25,7 @@ namespace gtsam {
/**
* LieScalar is a wrapper around double to allow it to be a Lie type
*/
- struct LieScalar {
+ struct LieScalar : public DerivedValue {
/** default constructor */
LieScalar() : d_(0.0) {}
diff --git a/gtsam/base/TestableAssertions.h b/gtsam/base/TestableAssertions.h
index 46f738f7d..dad1c6d20 100644
--- a/gtsam/base/TestableAssertions.h
+++ b/gtsam/base/TestableAssertions.h
@@ -70,6 +70,14 @@ bool assert_equal(const V& expected, const boost::optional& actual, double to
return assert_equal(expected, *actual, tol);
}
+template
+bool assert_equal(const V& expected, const boost::optional& actual, double tol = 1e-9) {
+ if (!actual) {
+ std::cout << "actual is boost::none" << std::endl;
+ return false;
+ }
+ return assert_equal(expected, *actual, tol);
+}
/**
* Version of assert_equals to work with vectors
@@ -247,6 +255,45 @@ bool assert_container_equal(const V& expected, const V& actual, double tol = 1e-
return true;
}
+/**
+ * Function for comparing maps of size_t->testable
+ * Types are assumed to have operator ==
+ */
+template
+bool assert_container_equality(const std::map& expected, const std::map& actual) {
+ typedef typename std::map Map;
+ bool match = true;
+ if (expected.size() != actual.size())
+ match = false;
+ typename Map::const_iterator
+ itExp = expected.begin(),
+ itAct = actual.begin();
+ if(match) {
+ for (; itExp!=expected.end() && itAct!=actual.end(); ++itExp, ++itAct) {
+ if (itExp->first != itAct->first || itExp->second != itAct->second) {
+ match = false;
+ break;
+ }
+ }
+ }
+ if(!match) {
+ std::cout << "expected: " << std::endl;
+ BOOST_FOREACH(const typename Map::value_type& a, expected) {
+ std::cout << "Key: " << a.first << std::endl;
+ std::cout << "Value: " << a.second << std::endl;
+ }
+ std::cout << "\nactual: " << std::endl;
+ BOOST_FOREACH(const typename Map::value_type& a, actual) {
+ std::cout << "Key: " << a.first << std::endl;
+ std::cout << "Value: " << a.second << std::endl;
+ }
+ std::cout << std::endl;
+ return false;
+ }
+ return true;
+}
+
+
/**
* General function for comparing containers of objects with operator==
*/
diff --git a/gtsam/inference/BayesNet.h b/gtsam/inference/BayesNet.h
index 684e28ae0..a05440d69 100644
--- a/gtsam/inference/BayesNet.h
+++ b/gtsam/inference/BayesNet.h
@@ -74,6 +74,12 @@ public:
/** Default constructor as an empty BayesNet */
BayesNet() {};
+ /** convert from a derived type */
+ template
+ BayesNet(const BayesNet& bn) {
+ conditionals_.assign(bn.begin(), bn.end());
+ }
+
/** BayesNet with 1 conditional */
BayesNet(const sharedConditional& conditional) { push_back(conditional); }
diff --git a/gtsam/inference/BayesTree-inl.h b/gtsam/inference/BayesTree-inl.h
index 23148cea8..95995a4e6 100644
--- a/gtsam/inference/BayesTree-inl.h
+++ b/gtsam/inference/BayesTree-inl.h
@@ -234,15 +234,56 @@ namespace gtsam {
}
}
- /* ************************************************************************* */
+ /* ************************************************************************* */
template
- BayesTree::BayesTree() {
+ void BayesTree::recursiveTreeBuild(const boost::shared_ptr >& symbolic,
+ const std::vector >& conditionals,
+ const typename BayesTree::sharedClique& parent) {
+
+ // Helper function to build a non-symbolic tree (e.g. Gaussian) using a
+ // symbolic tree, used in the BT(BN) constructor.
+
+ // Build the current clique
+ FastList cliqueConditionals;
+ BOOST_FOREACH(Index j, symbolic->conditional()->frontals()) {
+ cliqueConditionals.push_back(conditionals[j]); }
+ typename BayesTree::sharedClique thisClique(new CLIQUE(CONDITIONAL::Combine(cliqueConditionals.begin(), cliqueConditionals.end())));
+
+ // Add the new clique with the current parent
+ this->addClique(thisClique, parent);
+
+ // Build the children, whose parent is the new clique
+ BOOST_FOREACH(const BayesTree::sharedClique& child, symbolic->children()) {
+ this->recursiveTreeBuild(child, conditionals, thisClique); }
+ }
+
+ /* ************************************************************************* */
+ template
+ BayesTree::BayesTree(const BayesNet& bayesNet) {
+ // First generate symbolic BT to determine clique structure
+ BayesTree sbt(bayesNet);
+
+ // Build index of variables to conditionals
+ std::vector > conditionals(sbt.root()->conditional()->frontals().back() + 1);
+ BOOST_FOREACH(const boost::shared_ptr& c, bayesNet) {
+ if(c->nrFrontals() != 1)
+ throw std::invalid_argument("BayesTree constructor from BayesNet only supports single frontal variable conditionals");
+ if(c->firstFrontalKey() >= conditionals.size())
+ throw std::invalid_argument("An inconsistent BayesNet was passed into the BayesTree constructor!");
+ if(conditionals[c->firstFrontalKey()])
+ throw std::invalid_argument("An inconsistent BayesNet with duplicate frontal variables was passed into the BayesTree constructor!");
+
+ conditionals[c->firstFrontalKey()] = c;
+ }
+
+ // Build the new tree
+ this->recursiveTreeBuild(sbt.root(), conditionals, sharedClique());
}
/* ************************************************************************* */
- template
- BayesTree::BayesTree(const BayesNet& bayesNet) {
- typename BayesNet::const_reverse_iterator rit;
+ template<>
+ inline BayesTree::BayesTree(const BayesNet& bayesNet) {
+ BayesNet::const_reverse_iterator rit;
for ( rit=bayesNet.rbegin(); rit != bayesNet.rend(); ++rit )
insert(*this, *rit);
}
@@ -311,19 +352,6 @@ namespace gtsam {
typename CONTAINER::const_iterator lowestOrderedParent = min_element(parents.begin(), parents.end());
assert(lowestOrderedParent != parents.end());
return *lowestOrderedParent;
-
-// boost::optional parentCliqueRepresentative;
-// boost::optional lowest;
-// BOOST_FOREACH(Index p, parents) {
-// size_t i = index(p);
-// if (!lowest || i<*lowest) {
-// lowest.reset(i);
-// parentCliqueRepresentative.reset(p);
-// }
-// }
-// if (!lowest) throw
-// invalid_argument("BayesTree::findParentClique: no parents given or key not present in index");
-// return *parentCliqueRepresentative;
}
/* ************************************************************************* */
diff --git a/gtsam/inference/BayesTree.h b/gtsam/inference/BayesTree.h
index 87788e3d8..06a47ba8e 100644
--- a/gtsam/inference/BayesTree.h
+++ b/gtsam/inference/BayesTree.h
@@ -31,6 +31,7 @@
#include
#include
#include
+#include
#include
namespace gtsam {
@@ -127,15 +128,22 @@ namespace gtsam {
/** Fill the nodes index for a subtree */
void fillNodesIndex(const sharedClique& subtree);
+ /** Helper function to build a non-symbolic tree (e.g. Gaussian) using a
+ * symbolic tree, used in the BT(BN) constructor.
+ */
+ void recursiveTreeBuild(const boost::shared_ptr >& symbolic,
+ const std::vector >& conditionals,
+ const typename BayesTree::sharedClique& parent);
+
public:
/// @name Standard Constructors
/// @{
/** Create an empty Bayes Tree */
- BayesTree();
+ BayesTree() {}
- /** Create a Bayes Tree from a Bayes Net */
+ /** Create a Bayes Tree from a Bayes Net (requires CONDITIONAL is IndexConditional *or* CONDITIONAL::Combine) */
BayesTree(const BayesNet& bayesNet);
/// @}
diff --git a/gtsam/inference/BayesTreeCliqueBase.h b/gtsam/inference/BayesTreeCliqueBase.h
index 08427bae4..82cec6f76 100644
--- a/gtsam/inference/BayesTreeCliqueBase.h
+++ b/gtsam/inference/BayesTreeCliqueBase.h
@@ -19,6 +19,7 @@
#include
#include
+#include
#include
#include
diff --git a/gtsam/inference/Factor-inl.h b/gtsam/inference/Factor-inl.h
index d05cc1de3..d9fb1e0de 100644
--- a/gtsam/inference/Factor-inl.h
+++ b/gtsam/inference/Factor-inl.h
@@ -45,12 +45,12 @@ namespace gtsam {
void Factor::assertInvariants() const {
#ifndef NDEBUG
// Check that keys are all unique
- std::multiset < KeyType > nonunique(keys_.begin(), keys_.end());
- std::set < KeyType > unique(keys_.begin(), keys_.end());
+ std::multiset nonunique(keys_.begin(), keys_.end());
+ std::set unique(keys_.begin(), keys_.end());
bool correct = (nonunique.size() == unique.size())
&& std::equal(nonunique.begin(), nonunique.end(), unique.begin());
if (!correct) throw std::logic_error(
- "Factor::assertInvariants: detected inconsistency");
+ "Factor::assertInvariants: Factor contains duplicate keys");
#endif
}
diff --git a/gtsam/inference/Factor.h b/gtsam/inference/Factor.h
index 962bf44d1..87c1f3860 100644
--- a/gtsam/inference/Factor.h
+++ b/gtsam/inference/Factor.h
@@ -117,6 +117,14 @@ public:
Factor(KeyType key1, KeyType key2, KeyType key3, KeyType key4) : keys_(4) {
keys_[0] = key1; keys_[1] = key2; keys_[2] = key3; keys_[3] = key4; assertInvariants(); }
+ /** Construct 5-way factor */
+ Factor(KeyType key1, KeyType key2, KeyType key3, KeyType key4, KeyType key5) : keys_(5) {
+ keys_[0] = key1; keys_[1] = key2; keys_[2] = key3; keys_[3] = key4; keys_[4] = key5; assertInvariants(); }
+
+ /** Construct 6-way factor */
+ Factor(KeyType key1, KeyType key2, KeyType key3, KeyType key4, KeyType key5, KeyType key6) : keys_(6) {
+ keys_[0] = key1; keys_[1] = key2; keys_[2] = key3; keys_[3] = key4; keys_[4] = key5; keys_[5] = key6; assertInvariants(); }
+
/// @}
/// @name Advanced Constructors
/// @{
diff --git a/gtsam/inference/FactorGraph.h b/gtsam/inference/FactorGraph.h
index 12855e0c0..33cdd02a1 100644
--- a/gtsam/inference/FactorGraph.h
+++ b/gtsam/inference/FactorGraph.h
@@ -43,6 +43,7 @@ template class BayesTree;
class FactorGraph {
public:
typedef FACTOR FactorType;
+ typedef typename FACTOR::KeyType KeyType;
typedef boost::shared_ptr > shared_ptr;
typedef typename boost::shared_ptr sharedFactor;
typedef typename std::vector::iterator iterator;
@@ -56,6 +57,11 @@ template class BayesTree;
/** typedef for an eliminate subroutine */
typedef boost::function&, size_t)> Eliminate;
+ /** Typedef for the result of factorization */
+ typedef std::pair<
+ boost::shared_ptr,
+ FactorGraph > FactorizationResult;
+
protected:
/** concept check */
@@ -87,7 +93,7 @@ template class BayesTree;
/** convert from a derived type */
template
FactorGraph(const FactorGraph& factors) {
- factors_.insert(end(), factors.begin(), factors.end());
+ factors_.assign(factors.begin(), factors.end());
}
/// @}
diff --git a/gtsam/inference/GenericSequentialSolver-inl.h b/gtsam/inference/GenericSequentialSolver-inl.h
index 04e8cfbeb..b2629566b 100644
--- a/gtsam/inference/GenericSequentialSolver-inl.h
+++ b/gtsam/inference/GenericSequentialSolver-inl.h
@@ -89,7 +89,7 @@ namespace gtsam {
const std::vector& js, Eliminate function) const {
// Compute a COLAMD permutation with the marginal variable constrained to the end.
- Permutation::shared_ptr permutation(Inference::PermutationCOLAMD(*structure_, js));
+ Permutation::shared_ptr permutation(inference::PermutationCOLAMD(*structure_, js));
Permutation::shared_ptr permutationInverse(permutation->inverse());
// Permute the factors - NOTE that this permutes the original factors, not
diff --git a/gtsam/inference/IndexConditional.cpp b/gtsam/inference/IndexConditional.cpp
index 58d5f93d1..9830782f3 100644
--- a/gtsam/inference/IndexConditional.cpp
+++ b/gtsam/inference/IndexConditional.cpp
@@ -31,12 +31,12 @@ namespace gtsam {
// Checks for uniqueness of keys
Base::assertInvariants();
#ifndef NDEBUG
- // Check that separator keys are sorted
- FastSet uniquesorted(beginFrontals(), endFrontals());
- assert(uniquesorted.size() == nrFrontals() && std::equal(uniquesorted.begin(), uniquesorted.end(), beginFrontals()));
- // Check that separator keys are less than parent keys
- //BOOST_FOREACH(Index j, frontals()) {
- // assert(find_if(beginParents(), endParents(), _1 < j) == endParents()); }
+ // Check that frontal keys are sorted
+ //FastSet uniquesorted(beginFrontals(), endFrontals());
+ //assert(uniquesorted.size() == nrFrontals() && std::equal(uniquesorted.begin(), uniquesorted.end(), beginFrontals()));
+ //// Check that separator keys are less than parent keys
+ ////BOOST_FOREACH(Index j, frontals()) {
+ //// assert(find_if(beginParents(), endParents(), _1 < j) == endParents()); }
#endif
}
@@ -60,13 +60,13 @@ namespace gtsam {
/* ************************************************************************* */
void IndexConditional::permuteWithInverse(const Permutation& inversePermutation) {
// The permutation may not move the separators into the frontals
- #ifndef NDEBUG
- BOOST_FOREACH(const KeyType frontal, this->frontals()) {
- BOOST_FOREACH(const KeyType separator, this->parents()) {
- assert(inversePermutation[frontal] < inversePermutation[separator]);
- }
- }
- #endif
+// #ifndef NDEBUG
+// BOOST_FOREACH(const KeyType frontal, this->frontals()) {
+// BOOST_FOREACH(const KeyType separator, this->parents()) {
+// assert(inversePermutation[frontal] < inversePermutation[separator]);
+// }
+// }
+// #endif
BOOST_FOREACH(Index& key, keys())
key = inversePermutation[key];
assertInvariants();
diff --git a/gtsam/inference/Permutation.h b/gtsam/inference/Permutation.h
index 5cbd8e187..dc3aa669a 100644
--- a/gtsam/inference/Permutation.h
+++ b/gtsam/inference/Permutation.h
@@ -26,8 +26,6 @@
namespace gtsam {
-class Inference;
-
/**
* A permutation reorders variables, for example to reduce fill-in during
* elimination. To save computation, the permutation can be applied to
@@ -162,8 +160,6 @@ protected:
void check(Index variable) const { assert(variable < rangeIndices_.size()); }
/// @}
-
- friend class Inference;
};
diff --git a/gtsam/inference/VariableIndex.h b/gtsam/inference/VariableIndex.h
index fc4fde620..878f6a85a 100644
--- a/gtsam/inference/VariableIndex.h
+++ b/gtsam/inference/VariableIndex.h
@@ -26,8 +26,6 @@
namespace gtsam {
-class Inference;
-
/**
* The VariableIndex class computes and stores the block column structure of a
* factor graph. The factor graph stores a collection of factors, each of
diff --git a/gtsam/inference/inference-inl.h b/gtsam/inference/inference-inl.h
new file mode 100644
index 000000000..02e78e8e9
--- /dev/null
+++ b/gtsam/inference/inference-inl.h
@@ -0,0 +1,120 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file inference-inl.h
+ * @brief
+ * @author Richard Roberts
+ * @date Mar 3, 2012
+ */
+
+#pragma once
+
+#include
+
+// Only for Eclipse parser, inference-inl.h (this file) is included at the bottom of inference.h
+#include
+
+#include
+
+namespace gtsam {
+
+namespace inference {
+
+/* ************************************************************************* */
+template
+Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex, const CONSTRAINED& constrainLast) {
+
+ std::vector cmember(variableIndex.size(), 0);
+
+ // If at least some variables are not constrained to be last, constrain the
+ // ones that should be constrained.
+ if(constrainLast.size() < variableIndex.size()) {
+ BOOST_FOREACH(Index var, constrainLast) {
+ assert(var < variableIndex.size());
+ cmember[var] = 1;
+ }
+ }
+
+ return PermutationCOLAMD_(variableIndex, cmember);
+}
+
+/* ************************************************************************* */
+inline Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex) {
+ std::vector cmember(variableIndex.size(), 0);
+ return PermutationCOLAMD_(variableIndex, cmember);
+}
+
+/* ************************************************************************* */
+template
+typename Graph::FactorizationResult eliminate(const Graph& factorGraph, const std::vector& variables,
+ const typename Graph::Eliminate& eliminateFcn, boost::optional variableIndex_) {
+
+ const VariableIndex& variableIndex = variableIndex_ ? *variableIndex_ : VariableIndex(factorGraph);
+
+ // First find the involved factors
+ Graph involvedFactors;
+ Index highestInvolvedVariable = 0; // Largest index of the variables in the involved factors
+
+ // First get the indices of the involved factors, but uniquely in a set
+ FastSet involvedFactorIndices;
+ BOOST_FOREACH(Index variable, variables) {
+ involvedFactorIndices.insert(variableIndex[variable].begin(), variableIndex[variable].end()); }
+
+ // Add the factors themselves to involvedFactors and update largest index
+ involvedFactors.reserve(involvedFactorIndices.size());
+ BOOST_FOREACH(size_t factorIndex, involvedFactorIndices) {
+ const typename Graph::sharedFactor factor = factorGraph[factorIndex];
+ involvedFactors.push_back(factor); // Add involved factor
+ highestInvolvedVariable = std::max( // Updated largest index
+ highestInvolvedVariable,
+ *std::max_element(factor->begin(), factor->end()));
+ }
+
+ // Now permute the variables to be eliminated to the front of the ordering
+ Permutation toFront = Permutation::PullToFront(variables, highestInvolvedVariable+1);
+ Permutation toFrontInverse = *toFront.inverse();
+ involvedFactors.permuteWithInverse(toFrontInverse);
+
+ // Eliminate into conditional and remaining factor
+ typename Graph::EliminationResult eliminated = eliminateFcn(involvedFactors, variables.size());
+ boost::shared_ptr conditional = eliminated.first;
+ typename Graph::sharedFactor remainingFactor = eliminated.second;
+
+ // Undo the permutation
+ conditional->permuteWithInverse(toFront);
+ remainingFactor->permuteWithInverse(toFront);
+
+ // Build the remaining graph, without the removed factors
+ Graph remainingGraph;
+ remainingGraph.reserve(factorGraph.size() - involvedFactors.size() + 1);
+ FastSet::const_iterator involvedFactorIndexIt = involvedFactorIndices.begin();
+ for(size_t i = 0; i < factorGraph.size(); ++i) {
+ if(involvedFactorIndexIt != involvedFactorIndices.end() && *involvedFactorIndexIt == i)
+ ++ involvedFactorIndexIt;
+ else
+ remainingGraph.push_back(factorGraph[i]);
+ }
+
+ // Add the remaining factor if it is not empty.
+ if(remainingFactor->size() != 0)
+ remainingGraph.push_back(remainingFactor);
+
+ return typename Graph::FactorizationResult(conditional, remainingGraph);
+
+}
+
+
+}
+
+}
+
+
diff --git a/gtsam/inference/inference.cpp b/gtsam/inference/inference.cpp
index d30ea79c4..e1c250fcf 100644
--- a/gtsam/inference/inference.cpp
+++ b/gtsam/inference/inference.cpp
@@ -29,7 +29,9 @@ using namespace std;
namespace gtsam {
-Permutation::shared_ptr Inference::PermutationCOLAMD_(const VariableIndex& variableIndex, std::vector& cmember) {
+namespace inference {
+
+Permutation::shared_ptr PermutationCOLAMD_(const VariableIndex& variableIndex, std::vector& cmember) {
size_t nEntries = variableIndex.nEntries(), nFactors = variableIndex.nFactors(), nVars = variableIndex.size();
// Convert to compressed column major format colamd wants it in (== MATLAB format!)
int Alen = ccolamd_recommended(nEntries, nFactors, nVars); /* colamd arg 3: size of the array A */
@@ -79,10 +81,10 @@ Permutation::shared_ptr Inference::PermutationCOLAMD_(const VariableIndex& varia
// Convert elimination ordering in p to an ordering
Permutation::shared_ptr permutation(new Permutation(nVars));
for (Index j = 0; j < nVars; j++) {
-// if(p[j] == -1)
-// permutation->operator[](j) = j;
-// else
- permutation->operator[](j) = p[j];
+ // if(p[j] == -1)
+ // permutation->operator[](j) = j;
+ // else
+ permutation->operator[](j) = p[j];
if(debug) cout << "COLAMD: " << j << "->" << p[j] << endl;
}
if(debug) cout << "COLAMD: p[" << nVars << "] = " << p[nVars] << endl;
@@ -91,3 +93,5 @@ Permutation::shared_ptr Inference::PermutationCOLAMD_(const VariableIndex& varia
}
}
+
+}
diff --git a/gtsam/inference/inference.h b/gtsam/inference/inference.h
index d853ff91b..d87720b5a 100644
--- a/gtsam/inference/inference.h
+++ b/gtsam/inference/inference.h
@@ -22,58 +22,59 @@
#include
#include
+#include
#include
namespace gtsam {
- class Inference {
- private:
- /* Static members only, private constructor */
- Inference() {}
+namespace inference {
- public:
+/**
+ * Compute a permutation (variable ordering) using colamd
+ */
+Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex);
- /**
- * Compute a permutation (variable ordering) using colamd
- */
- static Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex);
+/**
+ * Compute a permutation (variable ordering) using constrained colamd
+ */
+template
+Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex, const CONSTRAINED& constrainLast);
- /**
- * Compute a permutation (variable ordering) using constrained colamd
- */
- template
- static Permutation::shared_ptr PermutationCOLAMD(const VariableIndex& variableIndex, const CONSTRAINED& constrainLast);
+/**
+ * Compute a CCOLAMD permutation using the constraint groups in cmember.
+ */
+Permutation::shared_ptr PermutationCOLAMD_(const VariableIndex& variableIndex, std::vector& cmember);
- /**
- * Compute a CCOLAMD permutation using the constraint groups in cmember.
- */
- static Permutation::shared_ptr PermutationCOLAMD_(const VariableIndex& variableIndex, std::vector& cmember);
+/** Factor the factor graph into a conditional and a remaining factor graph.
+ * Given the factor graph \f$ f(X) \f$, and \c variables to factorize out
+ * \f$ V \f$, this function factorizes into \f$ f(X) = f(V;Y)f(Y) \f$, where
+ * \f$ Y := X\V \f$ are the remaining variables. If \f$ f(X) = p(X) \f$ is
+ * a probability density or likelihood, the factorization produces a
+ * conditional probability density and a marginal \f$ p(X) = p(V|Y)p(Y) \f$.
+ *
+ * For efficiency, this function treats the variables to eliminate
+ * \c variables as fully-connected, so produces a dense (fully-connected)
+ * conditional on all of the variables in \c variables, instead of a sparse
+ * BayesNet. If the variables are not fully-connected, it is more efficient
+ * to sequentially factorize multiple times.
+ */
+template
+typename Graph::FactorizationResult eliminate(const Graph& factorGraph, const std::vector& variables,
+ const typename Graph::Eliminate& eliminateFcn, boost::optional variableIndex = boost::none);
- };
+/** Eliminate a single variable, by calling
+ * eliminate(const Graph&, const std::vector&, const typename Graph::Eliminate&, boost::optional)
+ */
+template
+typename Graph::FactorizationResult eliminateOne(const Graph& factorGraph, typename Graph::KeyType variable,
+ const typename Graph::Eliminate& eliminateFcn, boost::optional variableIndex = boost::none) {
+ std::vector variables(1, variable);
+ return eliminate(factorGraph, variables, eliminateFcn, variableIndex);
+}
- /* ************************************************************************* */
- template
- Permutation::shared_ptr Inference::PermutationCOLAMD(const VariableIndex& variableIndex, const CONSTRAINED& constrainLast) {
-
- std::vector cmember(variableIndex.size(), 0);
-
- // If at least some variables are not constrained to be last, constrain the
- // ones that should be constrained.
- if(constrainLast.size() < variableIndex.size()) {
- BOOST_FOREACH(Index var, constrainLast) {
- assert(var < variableIndex.size());
- cmember[var] = 1;
- }
- }
-
- return PermutationCOLAMD_(variableIndex, cmember);
- }
-
- /* ************************************************************************* */
- inline Permutation::shared_ptr Inference::PermutationCOLAMD(const VariableIndex& variableIndex) {
- std::vector cmember(variableIndex.size(), 0);
- return PermutationCOLAMD_(variableIndex, cmember);
- }
+}
} // namespace gtsam
+
+#include
diff --git a/gtsam/inference/tests/testInference.cpp b/gtsam/inference/tests/testInference.cpp
index 697ee9ec2..41c33a456 100644
--- a/gtsam/inference/tests/testInference.cpp
+++ b/gtsam/inference/tests/testInference.cpp
@@ -25,7 +25,7 @@
using namespace gtsam;
/* ************************************************************************* */
-TEST(Inference, UnobservedVariables) {
+TEST(inference, UnobservedVariables) {
SymbolicFactorGraph sfg;
// Create a factor graph that skips some variables
@@ -35,7 +35,7 @@ TEST(Inference, UnobservedVariables) {
VariableIndex variableIndex(sfg);
- Permutation::shared_ptr colamd(Inference::PermutationCOLAMD(variableIndex));
+ Permutation::shared_ptr colamd(inference::PermutationCOLAMD(variableIndex));
}
/* ************************************************************************* */
diff --git a/gtsam/linear/GaussianBayesNet.cpp b/gtsam/linear/GaussianBayesNet.cpp
index e05fbe45c..5721a4760 100644
--- a/gtsam/linear/GaussianBayesNet.cpp
+++ b/gtsam/linear/GaussianBayesNet.cpp
@@ -83,33 +83,14 @@ boost::shared_ptr allocateVectorValues(const GaussianBayesNet& bn)
/* ************************************************************************* */
VectorValues optimize(const GaussianBayesNet& bn) {
- return *optimize_(bn);
-}
-
-/* ************************************************************************* */
-boost::shared_ptr optimize_(const GaussianBayesNet& bn)
-{
- // get the RHS as a VectorValues to initialize system
- boost::shared_ptr result(new VectorValues(rhs(bn)));
-
- /** solve each node in turn in topological sort order (parents first)*/
- BOOST_REVERSE_FOREACH(GaussianConditional::shared_ptr cg, bn)
- cg->solveInPlace(*result); // solve and store solution in same step
-
- return result;
-}
-
-/* ************************************************************************* */
-VectorValues backSubstitute(const GaussianBayesNet& bn, const VectorValues& y) {
- VectorValues x(y);
- backSubstituteInPlace(bn,x);
+ VectorValues x = *allocateVectorValues(bn);
+ optimizeInPlace(bn, x);
return x;
}
/* ************************************************************************* */
// (R*x)./sigmas = y by solving x=inv(R)*(y.*sigmas)
-void backSubstituteInPlace(const GaussianBayesNet& bn, VectorValues& y) {
- VectorValues& x = y;
+void optimizeInPlace(const GaussianBayesNet& bn, VectorValues& x) {
/** solve each node in turn in topological sort order (parents first)*/
BOOST_REVERSE_FOREACH(const boost::shared_ptr cg, bn) {
// i^th part of R*x=y, x=inv(R)*y
@@ -141,6 +122,42 @@ VectorValues backSubstituteTranspose(const GaussianBayesNet& bn,
return gy;
}
+/* ************************************************************************* */
+VectorValues optimizeGradientSearch(const GaussianBayesNet& Rd) {
+ tic(0, "Allocate VectorValues");
+ VectorValues grad = *allocateVectorValues(Rd);
+ toc(0, "Allocate VectorValues");
+
+ optimizeGradientSearchInPlace(Rd, grad);
+
+ return grad;
+}
+
+/* ************************************************************************* */
+void optimizeGradientSearchInPlace(const GaussianBayesNet& Rd, VectorValues& grad) {
+ tic(1, "Compute Gradient");
+ // Compute gradient (call gradientAtZero function, which is defined for various linear systems)
+ gradientAtZero(Rd, grad);
+ double gradientSqNorm = grad.dot(grad);
+ toc(1, "Compute Gradient");
+
+ tic(2, "Compute R*g");
+ // Compute R * g
+ FactorGraph Rd_jfg(Rd);
+ Errors Rg = Rd_jfg * grad;
+ toc(2, "Compute R*g");
+
+ tic(3, "Compute minimizing step size");
+ // Compute minimizing step size
+ double step = -gradientSqNorm / dot(Rg, Rg);
+ toc(3, "Compute minimizing step size");
+
+ tic(4, "Compute point");
+ // Compute steepest descent point
+ scal(step, grad);
+ toc(4, "Compute point");
+}
+
/* ************************************************************************* */
pair matrix(const GaussianBayesNet& bn) {
@@ -194,15 +211,6 @@ pair matrix(const GaussianBayesNet& bn) {
return make_pair(R,d);
}
-/* ************************************************************************* */
-VectorValues rhs(const GaussianBayesNet& bn) {
- boost::shared_ptr result(allocateVectorValues(bn));
- BOOST_FOREACH(boost::shared_ptr cg,bn)
- cg->rhs(*result);
-
- return *result;
-}
-
/* ************************************************************************* */
double determinant(const GaussianBayesNet& bayesNet) {
double logDet = 0.0;
diff --git a/gtsam/linear/GaussianBayesNet.h b/gtsam/linear/GaussianBayesNet.h
index 455536a4f..1ef9081d4 100644
--- a/gtsam/linear/GaussianBayesNet.h
+++ b/gtsam/linear/GaussianBayesNet.h
@@ -55,26 +55,56 @@ namespace gtsam {
boost::shared_ptr allocateVectorValues(const GaussianBayesNet& bn);
/**
- * optimize, i.e. return x = inv(R)*d
+ * Solve the GaussianBayesNet, i.e. return \f$ x = R^{-1}*d \f$, computed by
+ * back-substitution.
*/
- VectorValues optimize(const GaussianBayesNet&);
+ VectorValues optimize(const GaussianBayesNet& bn);
- /**
- * shared pointer version
- */
- boost::shared_ptr optimize_(const GaussianBayesNet& bn);
+ /**
+ * Solve the GaussianBayesNet, i.e. return \f$ x = R^{-1}*d \f$, computed by
+ * back-substitution, writes the solution \f$ x \f$ into a pre-allocated
+ * VectorValues. You can use allocateVectorValues(const GaussianBayesNet&)
+ * allocate it. See also optimize(const GaussianBayesNet&), which does not
+ * require pre-allocation.
+ */
+ void optimizeInPlace(const GaussianBayesNet& bn, VectorValues& x);
- /**
- * Backsubstitute
- * (R*x)./sigmas = y by solving x=inv(R)*(y.*sigmas)
- * @param y is the RHS of the system
- */
- VectorValues backSubstitute(const GaussianBayesNet& bn, const VectorValues& y);
+ /**
+ * Optimize along the gradient direction, with a closed-form computation to
+ * perform the line search. The gradient is computed about \f$ \delta x=0 \f$.
+ *
+ * This function returns \f$ \delta x \f$ that minimizes a reparametrized
+ * problem. The error function of a GaussianBayesNet is
+ *
+ * \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f]
+ *
+ * with gradient and Hessian
+ *
+ * \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
+ *
+ * This function performs the line search in the direction of the
+ * gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size
+ * \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$:
+ *
+ * \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
+ *
+ * Optimizing by setting the derivative to zero yields
+ * \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function
+ * evaluates the denominator without computing the Hessian \f$ G \f$, returning
+ *
+ * \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f]
+ *
+ * @param bn The GaussianBayesNet on which to perform this computation
+ * @return The resulting \f$ \delta x \f$ as described above
+ */
+ VectorValues optimizeGradientSearch(const GaussianBayesNet& bn);
- /**
- * Backsubstitute in place, y starts as RHS and is replaced with solution
- */
- void backSubstituteInPlace(const GaussianBayesNet& bn, VectorValues& y);
+ /** In-place version of optimizeGradientSearch(const GaussianBayesNet&) requiring pre-allocated VectorValues \c grad
+ *
+ * @param bn The GaussianBayesNet on which to perform this computation
+ * @param [out] grad The resulting \f$ \delta x \f$ as described in optimizeGradientSearch(const GaussianBayesNet&)
+ * */
+ void optimizeGradientSearchInPlace(const GaussianBayesNet& bn, VectorValues& grad);
/**
* Transpose Backsubstitute
@@ -91,12 +121,6 @@ namespace gtsam {
*/
std::pair matrix(const GaussianBayesNet&);
- /**
- * Return RHS d as a VectorValues
- * Such that backSubstitute(bn,d) = optimize(bn)
- */
- VectorValues rhs(const GaussianBayesNet&);
-
/**
* Computes the determinant of a GassianBayesNet
* A GaussianBayesNet is an upper triangular matrix and for an upper triangular matrix
diff --git a/gtsam/linear/GaussianBayesTree-inl.h b/gtsam/linear/GaussianBayesTree-inl.h
new file mode 100644
index 000000000..9cb327bec
--- /dev/null
+++ b/gtsam/linear/GaussianBayesTree-inl.h
@@ -0,0 +1,28 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file GaussianBayesTree.cpp
+ * @brief Gaussian Bayes Tree, the result of eliminating a GaussianJunctionTree
+ * @brief GaussianBayesTree
+ * @author Frank Dellaert
+ * @author Richard Roberts
+ */
+
+#pragma once
+
+#include
+
+#include // Only to help Eclipse
+
+namespace gtsam {
+
+}
diff --git a/gtsam/linear/GaussianBayesTree.cpp b/gtsam/linear/GaussianBayesTree.cpp
new file mode 100644
index 000000000..4243657e5
--- /dev/null
+++ b/gtsam/linear/GaussianBayesTree.cpp
@@ -0,0 +1,99 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file GaussianBayesTree.cpp
+ * @brief Gaussian Bayes Tree, the result of eliminating a GaussianJunctionTree
+ * @brief GaussianBayesTree
+ * @author Frank Dellaert
+ * @author Richard Roberts
+ */
+
+#include
+#include
+
+namespace gtsam {
+
+/* ************************************************************************* */
+namespace internal {
+void optimizeInPlace(const boost::shared_ptr >& clique, VectorValues& result) {
+ // parents are assumed to already be solved and available in result
+ clique->conditional()->solveInPlace(result);
+
+ // starting from the root, call optimize on each conditional
+ BOOST_FOREACH(const boost::shared_ptr >& child, clique->children_)
+ optimizeInPlace(child, result);
+}
+}
+
+/* ************************************************************************* */
+VectorValues optimize(const GaussianBayesTree& bayesTree) {
+ VectorValues result = *allocateVectorValues(bayesTree);
+ internal::optimizeInPlace(bayesTree.root(), result);
+ return result;
+}
+
+/* ************************************************************************* */
+VectorValues optimizeGradientSearch(const GaussianBayesTree& Rd) {
+ tic(0, "Allocate VectorValues");
+ VectorValues grad = *allocateVectorValues(Rd);
+ toc(0, "Allocate VectorValues");
+
+ optimizeGradientSearchInPlace(Rd, grad);
+
+ return grad;
+}
+
+/* ************************************************************************* */
+void optimizeGradientSearchInPlace(const GaussianBayesTree& Rd, VectorValues& grad) {
+ tic(1, "Compute Gradient");
+ // Compute gradient (call gradientAtZero function, which is defined for various linear systems)
+ gradientAtZero(Rd, grad);
+ double gradientSqNorm = grad.dot(grad);
+ toc(1, "Compute Gradient");
+
+ tic(2, "Compute R*g");
+ // Compute R * g
+ FactorGraph Rd_jfg(Rd);
+ Errors Rg = Rd_jfg * grad;
+ toc(2, "Compute R*g");
+
+ tic(3, "Compute minimizing step size");
+ // Compute minimizing step size
+ double step = -gradientSqNorm / dot(Rg, Rg);
+ toc(3, "Compute minimizing step size");
+
+ tic(4, "Compute point");
+ // Compute steepest descent point
+ scal(step, grad);
+ toc(4, "Compute point");
+}
+
+/* ************************************************************************* */
+void optimizeInPlace(const GaussianBayesTree& bayesTree, VectorValues& result) {
+ internal::optimizeInPlace(bayesTree.root(), result);
+}
+
+/* ************************************************************************* */
+VectorValues gradient(const GaussianBayesTree& bayesTree, const VectorValues& x0) {
+ return gradient(FactorGraph(bayesTree), x0);
+}
+
+/* ************************************************************************* */
+void gradientAtZero(const GaussianBayesTree& bayesTree, VectorValues& g) {
+ gradientAtZero(FactorGraph(bayesTree), g);
+}
+
+}
+
+
+
+
diff --git a/gtsam/linear/GaussianBayesTree.h b/gtsam/linear/GaussianBayesTree.h
new file mode 100644
index 000000000..bb15eb063
--- /dev/null
+++ b/gtsam/linear/GaussianBayesTree.h
@@ -0,0 +1,95 @@
+/* ----------------------------------------------------------------------------
+
+ * GTSAM Copyright 2010, Georgia Tech Research Corporation,
+ * Atlanta, Georgia 30332-0415
+ * All Rights Reserved
+ * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
+
+ * See LICENSE for the license information
+
+ * -------------------------------------------------------------------------- */
+
+/**
+ * @file GaussianBayesTree.h
+ * @brief Gaussian Bayes Tree, the result of eliminating a GaussianJunctionTree
+ * @brief GaussianBayesTree
+ * @author Frank Dellaert
+ * @author Richard Roberts
+ */
+
+#pragma once
+
+#include
+#include
+#include
+
+namespace gtsam {
+
+typedef BayesTree GaussianBayesTree;
+
+/// optimize the BayesTree, starting from the root
+VectorValues optimize(const GaussianBayesTree& bayesTree);
+
+/// recursively optimize this conditional and all subtrees
+void optimizeInPlace(const GaussianBayesTree& clique, VectorValues& result);
+
+namespace internal {
+void optimizeInPlace(const boost::shared_ptr >& clique, VectorValues& result);
+}
+
+/**
+ * Optimize along the gradient direction, with a closed-form computation to
+ * perform the line search. The gradient is computed about \f$ \delta x=0 \f$.
+ *
+ * This function returns \f$ \delta x \f$ that minimizes a reparametrized
+ * problem. The error function of a GaussianBayesNet is
+ *
+ * \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f]
+ *
+ * with gradient and Hessian
+ *
+ * \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
+ *
+ * This function performs the line search in the direction of the
+ * gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size
+ * \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$:
+ *
+ * \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
+ *
+ * Optimizing by setting the derivative to zero yields
+ * \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function
+ * evaluates the denominator without computing the Hessian \f$ G \f$, returning
+ *
+ * \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f]
+ */
+VectorValues optimizeGradientSearch(const GaussianBayesTree& bn);
+
+/** In-place version of optimizeGradientSearch requiring pre-allocated VectorValues \c x */
+void optimizeGradientSearchInPlace(const GaussianBayesTree& bn, VectorValues& grad);
+
+/**
+ * Compute the gradient of the energy function,
+ * \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
+ * centered around \f$ x = x_0 \f$.
+ * The gradient is \f$ R^T(Rx-d) \f$.
+ * @param bayesTree The Gaussian Bayes Tree $(R,d)$
+ * @param x0 The center about which to compute the gradient
+ * @return The gradient as a VectorValues
+ */
+VectorValues gradient(const GaussianBayesTree& bayesTree, const VectorValues& x0);
+
+/**
+ * Compute the gradient of the energy function,
+ * \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
+ * centered around zero.
+ * The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&).
+ * @param bayesTree The Gaussian Bayes Tree $(R,d)$
+ * @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
+ * @return The gradient as a VectorValues
+ */
+void gradientAtZero(const GaussianBayesTree& bayesTree, VectorValues& g);
+
+}
+
+#include
+
diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp
index 7bff355d7..233dc4b34 100644
--- a/gtsam/linear/GaussianConditional.cpp
+++ b/gtsam/linear/GaussianConditional.cpp
@@ -31,19 +31,19 @@ namespace gtsam {
// Helper function used only in this file - extracts vectors with variable indices
// in the first and last iterators, and concatenates them in that order into the
// output.
-template
-static Vector extractVectorValuesSlices(const VectorValues& values, ITERATOR first, ITERATOR last) {
+template
+static Vector extractVectorValuesSlices(const VALUES& values, ITERATOR first, ITERATOR last) {
// Find total dimensionality
int dim = 0;
for(ITERATOR j = first; j != last; ++j)
- dim += values.dim(*j);
+ dim += values[*j].rows();
// Copy vectors
Vector ret(dim);
int varStart = 0;
for(ITERATOR j = first; j != last; ++j) {
- ret.segment(varStart, values.dim(*j)) = values[*j];
- varStart += values.dim(*j);
+ ret.segment(varStart, values[*j].rows()) = values[*j];
+ varStart += values[*j].rows();
}
return ret;
}
@@ -52,15 +52,15 @@ static Vector extractVectorValuesSlices(const VectorValues& values, ITERATOR fir
// Helper function used only in this file - writes to the variables in values
// with indices iterated over by first and last, interpreting vector as the
// concatenated vectors to write.
-template
-static void writeVectorValuesSlices(const VECTOR& vector, VectorValues& values, ITERATOR first, ITERATOR last) {
+template
+static void writeVectorValuesSlices(const VECTOR& vector, VALUES& values, ITERATOR first, ITERATOR last) {
// Copy vectors
int varStart = 0;
for(ITERATOR j = first; j != last; ++j) {
- values[*j] = vector.segment(varStart, values.dim(*j));
- varStart += values.dim(*j);
+ values[*j] = vector.segment(varStart, values[*j].rows());
+ varStart += values[*j].rows();
}
- assert(varStart = vector.rows());
+ assert(varStart == vector.rows());
}
/* ************************************************************************* */
@@ -221,78 +221,34 @@ JacobianFactor::shared_ptr GaussianConditional::toFactor() const {
}
/* ************************************************************************* */
-void GaussianConditional::rhs(VectorValues& x) const {
- writeVectorValuesSlices(get_d(), x, beginFrontals(), endFrontals());
-}
+template
+inline static void doSolveInPlace(const GaussianConditional& conditional, VALUES& x) {
-/* ************************************************************************* */
-void GaussianConditional::rhs(Permuted& x) const {
- // Copy the rhs into x, accounting for the permutation
- Vector d = get_d();
- size_t rhsPosition = 0; // We walk through the rhs by variable
- for(const_iterator j = beginFrontals(); j != endFrontals(); ++j) {
- // Get the segment of the rhs for this variable
- x[*j] = d.segment(rhsPosition, this->dim(j));
- // Increment the position
- rhsPosition += this->dim(j);
+ // Helper function to solve-in-place on a VectorValues or Permuted,
+ // called by GaussianConditional::solveInPlace(VectorValues&) and by
+ // GaussianConditional::solveInPlace(Permuted&).
+
+ static const bool debug = false;
+ if(debug) conditional.print("Solving conditional in place");
+ Vector xS = extractVectorValuesSlices(x, conditional.beginParents(), conditional.endParents());
+ xS = conditional.get_d() - conditional.get_S() * xS;
+ Vector soln = conditional.permutation().transpose() *
+ conditional.get_R().triangularView().solve(xS);
+ if(debug) {
+ gtsam::print(Matrix(conditional.get_R()), "Calling backSubstituteUpper on ");
+ gtsam::print(soln, "full back-substitution solution: ");
}
+ writeVectorValuesSlices(soln, x, conditional.beginFrontals(), conditional.endFrontals());
}
/* ************************************************************************* */
void GaussianConditional::solveInPlace(VectorValues& x) const {
- static const bool debug = false;
- if(debug) print("Solving conditional in place");
- Vector rhs = extractVectorValuesSlices(x, beginFrontals(), endFrontals());
- for (const_iterator parent = beginParents(); parent != endParents(); ++parent) {
- rhs += -get_S(parent) * x[*parent];
- }
- Vector soln = permutation_.transpose() * get_R().triangularView().solve(rhs);
- if(debug) {
- gtsam::print(Matrix(get_R()), "Calling backSubstituteUpper on ");
- gtsam::print(rhs, "rhs: ");
- gtsam::print(soln, "full back-substitution solution: ");
- }
- writeVectorValuesSlices(soln, x, beginFrontals(), endFrontals());
+ doSolveInPlace(*this, x); // Call helper version above
}
/* ************************************************************************* */
void GaussianConditional::solveInPlace(Permuted& x) const {
- static const bool debug = false;
- if(debug) print("Solving conditional in place (permuted)");
- // Extract RHS from values - inlined from VectorValues
- size_t s = 0;
- for (const_iterator it=beginFrontals(); it!=endFrontals(); ++it)
- s += x[*it].size();
- Vector rhs(s); size_t start = 0;
- for (const_iterator it=beginFrontals(); it!=endFrontals(); ++it) {
- SubVector v = x[*it];
- const size_t d = v.size();
- rhs.segment(start, d) = v;
- start += d;
- }
-
- // apply parents to rhs
- for (const_iterator parent = beginParents(); parent != endParents(); ++parent) {
- rhs += -get_S(parent) * x[*parent];
- }
-
- // solve system - backsubstitution
- Vector soln = permutation_.transpose() * get_R().triangularView().solve(rhs);
-
- // apply solution: inlined manually due to permutation
- size_t solnStart = 0;
- for (const_iterator frontal = beginFrontals(); frontal != endFrontals(); ++frontal) {
- const size_t d = this->dim(frontal);
- x[*frontal] = soln.segment(solnStart, d);
- solnStart += d;
- }
-}
-
-/* ************************************************************************* */
-VectorValues GaussianConditional::solve(const VectorValues& x) const {
- VectorValues result = x;
- solveInPlace(result);
- return result;
+ doSolveInPlace(*this, x); // Call helper version above
}
/* ************************************************************************* */
diff --git a/gtsam/linear/GaussianConditional.h b/gtsam/linear/GaussianConditional.h
index f2a77c894..b33098061 100644
--- a/gtsam/linear/GaussianConditional.h
+++ b/gtsam/linear/GaussianConditional.h
@@ -43,7 +43,7 @@ class JacobianFactor;
/**
* A conditional Gaussian functions as the node in a Bayes network
* It has a set of parents y,z, etc. and implements a probability density on x.
- * The negative log-probability is given by \f$ |Rx - (d - Sy - Tz - ...)|^2 \f$
+ * The negative log-probability is given by \f$ \frac{1}{2} |Rx - (d - Sy - Tz - ...)|^2 \f$
*/
class GaussianConditional : public IndexConditional {
@@ -137,6 +137,15 @@ public:
/** Copy constructor */
GaussianConditional(const GaussianConditional& rhs);
+ /** Combine several GaussianConditional into a single dense GC. The
+ * conditionals enumerated by \c first and \c last must be in increasing
+ * order, meaning that the parents of any conditional may not include a
+ * conditional coming before it.
+ * @param firstConditional Iterator to the first conditional to combine, must dereference to a shared_ptr.
+ * @param lastConditional Iterator to after the last conditional to combine, must dereference to a shared_ptr. */
+ template
+ static shared_ptr Combine(ITERATOR firstConditional, ITERATOR lastConditional);
+
/** Assignment operator */
GaussianConditional& operator=(const GaussianConditional& rhs);
@@ -188,46 +197,39 @@ public:
*/
boost::shared_ptr toFactor() const;
- /**
- * Adds the RHS to a given VectorValues for use in solve() functions.
- * @param x is the values to be updated, assumed allocated
- */
- void rhs(VectorValues& x) const;
-
/**
- * Adds the RHS to a given VectorValues for use in solve() functions.
- * @param x is the values to be updated, assumed allocated
- */
- void rhs(Permuted& x) const;
-
- /**
- * solves a conditional Gaussian and stores the result in x
+ * Solves a conditional Gaussian and writes the solution into the entries of
+ * \c x for each frontal variable of the conditional. The parents are
+ * assumed to have already been solved in and their values are read from \c x.
* This function works for multiple frontal variables.
- * NOTE: assumes that the RHS for the frontals is stored in x, and
- * then replaces the RHS with the partial result for this conditional,
- * assuming that parents have been solved already.
*
- * @param x values structure with solved parents, and the RHS for this conditional
- * @return solution \f$ x = R \ (d - Sy - Tz - ...) \f$ for each frontal variable
+ * Given the Gaussian conditional with log likelihood \f$ |R x_f - (d - S x_s)|^2,
+ * where \f$ f \f$ are the frontal variables and \f$ s \f$ are the separator
+ * variables of this conditional, this solve function computes
+ * \f$ x_f = R^{-1} (d - S x_s) \f$ using back-substitution.
+ *
+ * @param x VectorValues structure with solved parents \f$ x_s \f$, and into which the
+ * solution \f$ x_f \f$ will be written.
*/
void solveInPlace(VectorValues& x) const;
/**
- * solves a conditional Gaussian and stores the result in x
- * Identical to solveInPlace() above, with a permuted x
+ * Solves a conditional Gaussian and writes the solution into the entries of
+ * \c x for each frontal variable of the conditional (version for permuted
+ * VectorValues). The parents are assumed to have already been solved in
+ * and their values are read from \c x. This function works for multiple
+ * frontal variables.
+ *
+ * Given the Gaussian conditional with log likelihood \f$ |R x_f - (d - S x_s)|^2,
+ * where \f$ f \f$ are the frontal variables and \f$ s \f$ are the separator
+ * variables of this conditional, this solve function computes
+ * \f$ x_f = R^{-1} (d - S x_s) \f$ using back-substitution.
+ *
+ * @param x VectorValues structure with solved parents \f$ x_s \f$, and into which the
+ * solution \f$ x_f \f$ will be written.
*/
void solveInPlace(Permuted& x) const;
- /**
- * Solves a conditional Gaussian and returns a new VectorValues
- * This function works for multiple frontal variables, but should
- * only be used for testing as it copies the input vector values
- *
- * Assumes, as in solveInPlace, that the RHS has been stored in x
- * for all frontal variables
- */
- VectorValues solve(const VectorValues& x) const;
-
// functions for transpose backsubstitution
/**
@@ -274,5 +276,49 @@ GaussianConditional::GaussianConditional(ITERATOR firstKey, ITERATOR lastKey,
}
/* ************************************************************************* */
+template
+GaussianConditional::shared_ptr GaussianConditional::Combine(ITERATOR firstConditional, ITERATOR lastConditional) {
+
+ // TODO: check for being a clique
+
+ // Get dimensions from first conditional
+ std::vector dims; dims.reserve((*firstConditional)->size() + 1);
+ for(const_iterator j = (*firstConditional)->begin(); j != (*firstConditional)->end(); ++j)
+ dims.push_back((*firstConditional)->dim(j));
+ dims.push_back(1);
+
+ // We assume the conditionals form clique, so the first n variables will be
+ // frontal variables in the new conditional.
+ size_t nFrontals = 0;
+ size_t nRows = 0;
+ for(ITERATOR c = firstConditional; c != lastConditional; ++c) {
+ nRows += dims[nFrontals];
+ ++ nFrontals;
+ }
+
+ // Allocate combined conditional, has same keys as firstConditional
+ Matrix tempCombined;
+ VerticalBlockView tempBlockView(tempCombined, dims.begin(), dims.end(), 0);
+ GaussianConditional::shared_ptr combinedConditional(new GaussianConditional((*firstConditional)->begin(), (*firstConditional)->end(), nFrontals, tempBlockView, zero(nRows)));
+
+ // Resize to correct number of rows
+ combinedConditional->matrix_.resize(nRows, combinedConditional->matrix_.cols());
+ combinedConditional->rsd_.rowEnd() = combinedConditional->matrix_.rows();
+
+ // Copy matrix and sigmas
+ const size_t totalDims = combinedConditional->matrix_.cols();
+ size_t currentSlot = 0;
+ for(ITERATOR c = firstConditional; c != lastConditional; ++c) {
+ const size_t startRow = combinedConditional->rsd_.offset(currentSlot); // Start row is same as start column
+ combinedConditional->rsd_.range(0, currentSlot).block(startRow, 0, dims[currentSlot], combinedConditional->rsd_.offset(currentSlot)).operator=(
+ Matrix::Zero(dims[currentSlot], combinedConditional->rsd_.offset(currentSlot)));
+ combinedConditional->rsd_.range(currentSlot, dims.size()).block(startRow, 0, dims[currentSlot], totalDims - startRow).operator=(
+ (*c)->matrix_);
+ combinedConditional->sigmas_.segment(startRow, dims[currentSlot]) = (*c)->sigmas_;
+ ++ currentSlot;
+ }
+
+ return combinedConditional;
+}
} // gtsam
diff --git a/gtsam/linear/GaussianDensity.cpp b/gtsam/linear/GaussianDensity.cpp
index 72e3bb325..5300d594f 100644
--- a/gtsam/linear/GaussianDensity.cpp
+++ b/gtsam/linear/GaussianDensity.cpp
@@ -42,7 +42,6 @@ namespace gtsam {
Index k = firstFrontalKey();
// a VectorValues that only has a value for k: cannot be printed if k<>0
x.insert(k, Vector(sigmas_.size()));
- rhs(x);
solveInPlace(x);
return x[k];
}
diff --git a/gtsam/linear/GaussianISAM.cpp b/gtsam/linear/GaussianISAM.cpp
index a9aa2479c..4402d8b05 100644
--- a/gtsam/linear/GaussianISAM.cpp
+++ b/gtsam/linear/GaussianISAM.cpp
@@ -17,6 +17,7 @@
#include
#include
+#include
#include
@@ -50,50 +51,14 @@ Matrix GaussianISAM::marginalCovariance(Index j) const {
return Super::jointBayesNet(key1, key2, &EliminateQR);
}
-/* ************************************************************************* */
-void optimize(const BayesTree::sharedClique& clique, VectorValues& result) {
- // parents are assumed to already be solved and available in result
- // RHS for current conditional should already be in place in result
- clique->conditional()->solveInPlace(result);
-
- BOOST_FOREACH(const BayesTree::sharedClique& child, clique->children_)
- optimize(child, result);
-}
-
-/* ************************************************************************* */
-void treeRHS(const BayesTree::sharedClique& clique, VectorValues& result) {
- clique->conditional()->rhs(result);
- BOOST_FOREACH(const BayesTree::sharedClique& child, clique->children_)
- treeRHS(child, result);
-}
-
-/* ************************************************************************* */
-VectorValues rhs(const BayesTree& bayesTree, boost::optional dims) {
- VectorValues result;
- if(dims)
- result = VectorValues(*dims);
- else
- result = *allocateVectorValues(bayesTree); // allocate
- treeRHS(bayesTree.root(), result); // recursively fill
- return result;
-}
-
/* ************************************************************************* */
VectorValues optimize(const GaussianISAM& isam) {
- VectorValues result = rhs(isam, isam.dims_);
- // starting from the root, call optimize on each conditional
- optimize(isam.root(), result);
+ VectorValues result(isam.dims_);
+ // Call optimize for BayesTree
+ optimizeInPlace((const BayesTree&)isam, result);
return result;
}
-/* ************************************************************************* */
-VectorValues optimize(const BayesTree& bayesTree) {
- VectorValues result = rhs(bayesTree);
- // starting from the root, call optimize on each conditional
- optimize(bayesTree.root(), result);
- return result;
-}
-
/* ************************************************************************* */
BayesNet GaussianISAM::shortcut(sharedClique clique, sharedClique root) {
return clique->shortcut(root,&EliminateQR);
diff --git a/gtsam/linear/GaussianISAM.h b/gtsam/linear/GaussianISAM.h
index a06c50657..af96d5317 100644
--- a/gtsam/linear/GaussianISAM.h
+++ b/gtsam/linear/GaussianISAM.h
@@ -90,19 +90,7 @@ public:
}; // \class GaussianISAM
-/** load a VectorValues with the RHS of the system for backsubstitution */
-VectorValues rhs(const BayesTree& bayesTree, boost::optional dims = boost::none);
-
-/** recursively load RHS for system */
-void treeRHS(const BayesTree::sharedClique& clique, VectorValues& result);
-
-// recursively optimize this conditional and all subtrees
-void optimize(const BayesTree::sharedClique& clique, VectorValues& result);
-
// optimize the BayesTree, starting from the root
VectorValues optimize(const GaussianISAM& isam);
-// optimize the BayesTree, starting from the root
-VectorValues optimize(const BayesTree& bayesTree);
-
} // \namespace gtsam
diff --git a/gtsam/linear/GaussianJunctionTree.cpp b/gtsam/linear/GaussianJunctionTree.cpp
index e2e1c2951..2c2a44dbb 100644
--- a/gtsam/linear/GaussianJunctionTree.cpp
+++ b/gtsam/linear/GaussianJunctionTree.cpp
@@ -20,6 +20,7 @@
#include
#include
#include
+#include
#include
@@ -33,32 +34,6 @@ namespace gtsam {
using namespace std;
- /* ************************************************************************* */
- void GaussianJunctionTree::btreeBackSubstitute(const BTClique::shared_ptr& current, VectorValues& config) const {
- // solve the bayes net in the current node
- current->conditional()->solveInPlace(config);
-
- // GaussianBayesNet::const_reverse_iterator it = current->rbegin();
-// for (; it!=current->rend(); ++it) {
-// (*it)->solveInPlace(config); // solve and store result
-//
-//// Vector x = (*it)->solve(config); // Solve for that variable
-//// config[(*it)->key()] = x; // store result in partial solution
-// }
-
- // solve the bayes nets in the child nodes
- BOOST_FOREACH(const BTClique::shared_ptr& child, current->children()) {
- btreeBackSubstitute(child, config);
- }
- }
-
- /* ************************************************************************* */
- void GaussianJunctionTree::btreeRHS(const BTClique::shared_ptr& current, VectorValues& config) const {
- current->conditional()->rhs(config);
- BOOST_FOREACH(const BTClique::shared_ptr& child, current->children())
- btreeRHS(child, config);
- }
-
/* ************************************************************************* */
VectorValues GaussianJunctionTree::optimize(Eliminate function) const {
tic(1, "GJT eliminate");
@@ -71,12 +46,11 @@ namespace gtsam {
vector dims(rootClique->conditional()->back()+1, 0);
countDims(rootClique, dims);
VectorValues result(dims);
- btreeRHS(rootClique, result);
toc(2, "allocate VectorValues");
// back-substitution
tic(3, "back-substitute");
- btreeBackSubstitute(rootClique, result);
+ internal::optimizeInPlace(rootClique, result);
toc(3, "back-substitute");
return result;
}
diff --git a/gtsam/linear/GaussianJunctionTree.h b/gtsam/linear/GaussianJunctionTree.h
index be98669f1..31a0a1680 100644
--- a/gtsam/linear/GaussianJunctionTree.h
+++ b/gtsam/linear/GaussianJunctionTree.h
@@ -45,13 +45,6 @@ namespace gtsam {
typedef Base::sharedClique sharedClique;
typedef GaussianFactorGraph::Eliminate Eliminate;
- protected:
- // back-substitute in topological sort order (parents first)
- void btreeBackSubstitute(const BTClique::shared_ptr& current, VectorValues& config) const;
-
- // find the RHS for the system in order to perform backsubstitution
- void btreeRHS(const BTClique::shared_ptr& current, VectorValues& config) const;
-
public :
/** Default constructor */
diff --git a/gtsam/linear/GaussianSequentialSolver.cpp b/gtsam/linear/GaussianSequentialSolver.cpp
index bfff98e0f..42bce03ce 100644
--- a/gtsam/linear/GaussianSequentialSolver.cpp
+++ b/gtsam/linear/GaussianSequentialSolver.cpp
@@ -78,7 +78,8 @@ VectorValues::shared_ptr GaussianSequentialSolver::optimize() const {
tic(2,"optimize");
// Back-substitute
- VectorValues::shared_ptr solution(gtsam::optimize_(*bayesNet));
+ VectorValues::shared_ptr solution(
+ new VectorValues(gtsam::optimize(*bayesNet)));
toc(2,"optimize");
if(debug) solution->print("GaussianSequentialSolver, solution ");
diff --git a/gtsam/linear/HessianFactor.h b/gtsam/linear/HessianFactor.h
index 2a16c0e38..c1e5f016a 100644
--- a/gtsam/linear/HessianFactor.h
+++ b/gtsam/linear/HessianFactor.h
@@ -71,7 +71,7 @@ namespace gtsam {
* and the information vector @f$ \eta = P^{-1} \mu = \Lambda \mu @f$
* to arrive at the canonical form of the Gaussian:
* @f[
- * E(x) = 0.5 x^T \Lambda x - x^T \eta + 0.5 \mu^T \Lambda \mu + C
+ * E(x) = 0.5 x^T \Lambda x - x^T \eta + 0.5 \mu^T \Lambda \mu
* @f]
*
* This factor is one of the factors that can be in a GaussianFactorGraph.
diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp
index 0218ecc8d..dbc2a5cff 100644
--- a/gtsam/linear/JacobianFactor.cpp
+++ b/gtsam/linear/JacobianFactor.cpp
@@ -642,8 +642,9 @@ namespace gtsam {
r.vector() = Vector::Zero(r.dim());
Index i = 0;
BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, fg) {
+ SubVector &y = r[i];
for(JacobianFactor::const_iterator j = factor->begin(); j != factor->end(); ++j) {
- r[i] += factor->getA(j) * x[*j];
+ y += factor->getA(j) * x[*j];
}
++i;
}
diff --git a/gtsam/linear/iterative-inl.h b/gtsam/linear/iterative-inl.h
index 59fd824bf..02048d3df 100644
--- a/gtsam/linear/iterative-inl.h
+++ b/gtsam/linear/iterative-inl.h
@@ -82,7 +82,7 @@ namespace gtsam {
/* ************************************************************************* */
// take a step, return true if converged
bool step(const S& Ab, V& x) {
- if ((++k) >= parameters_.maxIterations()) return true;
+ if ((++k) >= ((int)parameters_.maxIterations())) return true;
//---------------------------------->
double alpha = takeOptimalStep(x);
diff --git a/gtsam/linear/tests/testGaussianConditional.cpp b/gtsam/linear/tests/testGaussianConditional.cpp
index 9bac18da0..b19a1fda5 100644
--- a/gtsam/linear/tests/testGaussianConditional.cpp
+++ b/gtsam/linear/tests/testGaussianConditional.cpp
@@ -144,48 +144,6 @@ TEST( GaussianConditional, equals )
EXPECT( expected.equals(actual) );
}
-/* ************************************************************************* */
-TEST( GaussianConditional, rhs_permuted )
-{
- // Test filling the rhs when the VectorValues is permuted
-
- // Create a VectorValues
- VectorValues unpermuted(5, 2);
- unpermuted[0] << 1, 2;
- unpermuted[1] << 3, 4;
- unpermuted[2] << 5, 6;
- unpermuted[3] << 7, 8;
- unpermuted[4] << 9, 10;
-
- // Create a permutation
- Permutation permutation(5);
- permutation[0] = 4;
- permutation[1] = 3;
- permutation[2] = 2;
- permutation[3] = 1;
- permutation[4] = 0;
-
- // Permuted VectorValues
- Permuted permuted(permutation, unpermuted);
-
- // Expected VectorValues
- VectorValues expected(5, 2);
- expected[0] << 1, 2;
- expected[1] << 3, 4;
- expected[2] << 5, 6;
- expected[3] << 7, 8;
- expected[4] << 11, 12;
-
- // GaussianConditional
- Vector d(2); d << 11, 12;
- GaussianConditional conditional(0, d, Matrix::Identity(2,2), Vector::Ones(2));
-
- // Fill rhs, conditional is on index 0, which should fill slot 4 of the values
- conditional.rhs(permuted);
-
- EXPECT(assert_equal(expected, unpermuted));
-}
-
/* ************************************************************************* */
TEST( GaussianConditional, solve )
{
@@ -208,8 +166,7 @@ TEST( GaussianConditional, solve )
Vector tau = ones(2);
- // RHS is different than the one in the solution vector
- GaussianConditional cg(_x_,ones(2), R, _x1_, A1, _l1_, A2, tau);
+ GaussianConditional cg(_x_, d, R, _x1_, A1, _l1_, A2, tau);
Vector sx1(2);
sx1(0) = 1.0; sx1(1) = 1.0;
@@ -218,21 +175,16 @@ TEST( GaussianConditional, solve )
sl1(0) = 1.0; sl1(1) = 1.0;
VectorValues solution(vector(3, 2));
- solution[_x_] = d; // RHS
+ solution[_x_] = d;
solution[_x1_] = sx1; // parents
solution[_l1_] = sl1;
- // NOTE: the solve functions assume the RHS is passed as the initialization of
- // the solution.
VectorValues expected(vector(3, 2));
expected[_x_] = expectedX;
expected[_x1_] = sx1;
expected[_l1_] = sl1;
-
- VectorValues copy_result = cg.solve(solution);
cg.solveInPlace(solution);
- EXPECT(assert_equal(expected, copy_result, 0.0001));
EXPECT(assert_equal(expected, solution, 0.0001));
}
@@ -240,12 +192,11 @@ TEST( GaussianConditional, solve )
TEST( GaussianConditional, solve_simple )
{
// no pivoting from LDL, so R matrix is not permuted
- // RHS is deliberately not the same as below
Matrix full_matrix = Matrix_(4, 7,
- 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.0,
- 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.0,
- 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.0,
- 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.0);
+ 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1,
+ 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2,
+ 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.3,
+ 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4);
// solve system as a non-multifrontal version first
// 2 variables, frontal has dim=4
@@ -261,7 +212,6 @@ TEST( GaussianConditional, solve_simple )
// elimination order; _x_, _x1_
vector vdim; vdim += 4, 2;
VectorValues actual(vdim);
- actual[_x_] = Vector_(4, 0.1, 0.2, 0.3, 0.4); // d
actual[_x1_] = sx1; // parent
VectorValues expected(vdim);
@@ -283,10 +233,10 @@ TEST( GaussianConditional, solve_multifrontal )
// create full system, 3 variables, 2 frontals, all 2 dim
// no pivoting from LDL, so R matrix is not permuted
Matrix full_matrix = Matrix_(4, 7,
- 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.5,
- 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.6,
- 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.7,
- 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.8);
+ 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1,
+ 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2,
+ 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.3,
+ 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4);
// 3 variables, all dim=2
vector dims; dims += 2, 2, 2, 1;
@@ -295,15 +245,13 @@ TEST( GaussianConditional, solve_multifrontal )
vector cgdims; cgdims += _x_, _x1_, _l1_;
GaussianConditional cg(cgdims.begin(), cgdims.end(), 2, matrices, sigmas);
- EXPECT(assert_equal(Vector_(4, 0.5, 0.6, 0.7, 0.8), cg.get_d()));
+ EXPECT(assert_equal(Vector_(4, 0.1, 0.2, 0.3, 0.4), cg.get_d()));
// partial solution
Vector sl1 = Vector_(2, 9.0, 10.0);
// elimination order; _x_, _x1_, _l1_
VectorValues actual(vector(3, 2));
- actual[_x_] = Vector_(2, 0.1, 0.2); // rhs
- actual[_x1_] = Vector_(2, 0.3, 0.4); // rhs
actual[_l1_] = sl1; // parent
VectorValues expected(vector(3, 2));
@@ -327,10 +275,10 @@ TEST( GaussianConditional, solve_multifrontal_permuted )
// create full system, 3 variables, 2 frontals, all 2 dim
// no pivoting from LDL, so R matrix is not permuted
Matrix full_matrix = Matrix_(4, 7,
- 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.5,
- 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.6,
- 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.7,
- 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.8);
+ 1.0, 0.0, 2.0, 0.0, 3.0, 0.0, 0.1,
+ 0.0, 1.0, 0.0, 2.0, 0.0, 3.0, 0.2,
+ 0.0, 0.0, 3.0, 0.0, 4.0, 0.0, 0.3,
+ 0.0, 0.0, 0.0, 3.0, 0.0, 4.0, 0.4);
// 3 variables, all dim=2
vector dims; dims += 2, 2, 2, 1;
@@ -339,7 +287,7 @@ TEST( GaussianConditional, solve_multifrontal_permuted )
vector cgdims; cgdims += _x_, _x1_, _l1_;
GaussianConditional cg(cgdims.begin(), cgdims.end(), 2, matrices, sigmas);
- EXPECT(assert_equal(Vector_(4, 0.5, 0.6, 0.7, 0.8), cg.get_d()));
+ EXPECT(assert_equal(Vector_(4, 0.1, 0.2, 0.3, 0.4), cg.get_d()));
// partial solution
Vector sl1 = Vector_(2, 9.0, 10.0);
diff --git a/gtsam/linear/tests/testGaussianJunctionTree.cpp b/gtsam/linear/tests/testGaussianJunctionTree.cpp
index 2462f5255..3a9bcb4ad 100644
--- a/gtsam/linear/tests/testGaussianJunctionTree.cpp
+++ b/gtsam/linear/tests/testGaussianJunctionTree.cpp
@@ -103,6 +103,21 @@ TEST( GaussianJunctionTree, eliminate )
EXPECT(assert_equal(*(bayesTree_expected.root()->children().front()), *(rootClique->children().front())));
}
+/* ************************************************************************* */
+TEST_UNSAFE( GaussianJunctionTree, GBNConstructor )
+{
+ GaussianFactorGraph fg = createChain();
+ GaussianJunctionTree jt(fg);
+ BayesTree::sharedClique root = jt.eliminate(&EliminateQR);
+ BayesTree expected;
+ expected.insert(root);
+
+ GaussianBayesNet bn(*GaussianSequentialSolver(fg).eliminate());
+ BayesTree actual(bn);
+
+ EXPECT(assert_equal(expected, actual));
+}
+
/* ************************************************************************* */
TEST( GaussianJunctionTree, optimizeMultiFrontal )
{
diff --git a/gtsam/nonlinear/DoglegOptimizerImpl.h b/gtsam/nonlinear/DoglegOptimizerImpl.h
index af1e345b6..5feaa401a 100644
--- a/gtsam/nonlinear/DoglegOptimizerImpl.h
+++ b/gtsam/nonlinear/DoglegOptimizerImpl.h
@@ -115,29 +115,6 @@ struct DoglegOptimizerImpl {
*/
static VectorValues ComputeDoglegPoint(double Delta, const VectorValues& dx_u, const VectorValues& dx_n, const bool verbose=false);
- /** Compute the minimizer \f$ \delta x_u \f$ of the line search along the gradient direction \f$ g \f$ of
- * the function
- * \f[
- * M(\delta x) = (R \delta x - d)^T (R \delta x - d)
- * \f]
- * where \f$ R \f$ is an upper-triangular matrix and \f$ d \f$ is a vector.
- * Together \f$ (R,d) \f$ are either a Bayes' net or a Bayes' tree.
- *
- * The same quadratic error function written as a Taylor expansion of the original
- * non-linear error function is
- * \f[
- * M(\delta x) = f(x_0) + g(x_0) + \frac{1}{2} \delta x^T G(x_0) \delta x,
- * \f]
- * @tparam M The type of the Bayes' net or tree, currently
- * either BayesNet (or GaussianBayesNet) or BayesTree.
- * @param Rd The Bayes' net or tree \f$ (R,d) \f$ as described above, currently
- * this must be of type BayesNet (or GaussianBayesNet) or
- * BayesTree.
- * @return The minimizer \f$ \delta x_u \f$ along the gradient descent direction.
- */
- template
- static VectorValues ComputeSteepestDescentPoint(const M& Rd);
-
/** Compute the point on the line between the steepest descent point and the
* Newton's method point intersecting the trust region boundary.
* Mathematically, computes \f$ \tau \f$ such that \f$ 0<\tau<1 \f$ and
@@ -159,7 +136,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
// Compute steepest descent and Newton's method points
tic(0, "Steepest Descent");
- VectorValues dx_u = ComputeSteepestDescentPoint(Rd);
+ VectorValues dx_u = optimizeGradientSearch(Rd);
toc(0, "Steepest Descent");
tic(1, "optimize");
VectorValues dx_n = optimize(Rd);
@@ -173,7 +150,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
IterationResult result;
bool stay = true;
- enum { NONE, INCREASED_DELTA, DECREASED_DELTA } lastAction; // Used to prevent alternating between increasing and decreasing in one iteration
+ enum { NONE, INCREASED_DELTA, DECREASED_DELTA } lastAction = NONE; // Used to prevent alternating between increasing and decreasing in one iteration
while(stay) {
tic(3, "Dog leg point");
// Compute dog leg point
@@ -274,33 +251,4 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
return result;
}
-/* ************************************************************************* */
-template
-VectorValues DoglegOptimizerImpl::ComputeSteepestDescentPoint(const M& Rd) {
-
- tic(0, "Compute Gradient");
- // Compute gradient (call gradientAtZero function, which is defined for various linear systems)
- VectorValues grad = *allocateVectorValues(Rd);
- gradientAtZero(Rd, grad);
- double gradientSqNorm = grad.dot(grad);
- toc(0, "Compute Gradient");
-
- tic(1, "Compute R*g");
- // Compute R * g
- FactorGraph Rd_jfg(Rd);
- Errors Rg = Rd_jfg * grad;
- toc(1, "Compute R*g");
-
- tic(2, "Compute minimizing step size");
- // Compute minimizing step size
- double step = -gradientSqNorm / dot(Rg, Rg);
- toc(2, "Compute minimizing step size");
-
- tic(3, "Compute point");
- // Compute steepest descent point
- scal(step, grad);
- toc(3, "Compute point");
- return grad;
-}
-
}
diff --git a/gtsam/nonlinear/GaussianISAM2-inl.h b/gtsam/nonlinear/GaussianISAM2-inl.h
index de3992d28..57801fbe4 100644
--- a/gtsam/nonlinear/GaussianISAM2-inl.h
+++ b/gtsam/nonlinear/GaussianISAM2-inl.h
@@ -18,16 +18,16 @@
#include
#include
-using namespace std;
-using namespace gtsam;
-
#include
namespace gtsam {
+ using namespace std;
+
/* ************************************************************************* */
+ namespace internal {
template
- void optimize2(const boost::shared_ptr& clique, double threshold,
+ void optimizeWildfire(const boost::shared_ptr& clique, double threshold,
vector& changed, const vector& replaced, Permuted& delta, int& count) {
// if none of the variables in this clique (frontal and separator!) changed
// significantly, then by the running intersection property, none of the
@@ -64,7 +64,6 @@ namespace gtsam {
}
// Back-substitute
- (*clique)->rhs(delta);
(*clique)->solveInPlace(delta);
count += (*clique)->nrFrontals();
@@ -98,45 +97,61 @@ namespace gtsam {
// Recurse to children
BOOST_FOREACH(const typename CLIQUE::shared_ptr& child, clique->children_) {
- optimize2(child, threshold, changed, replaced, delta, count);
+ optimizeWildfire(child, threshold, changed, replaced, delta, count);
}
}
}
-
- /* ************************************************************************* */
- // fast full version without threshold
- template
- void optimize2(const boost::shared_ptr& clique, VectorValues& delta) {
-
- // parents are assumed to already be solved and available in result
- (*clique)->rhs(delta);
- (*clique)->solveInPlace(delta);
-
- // Solve chilren recursively
- BOOST_FOREACH(const typename CLIQUE::shared_ptr& child, clique->children_) {
- optimize2(child, delta);
- }
}
- ///* ************************************************************************* */
- //boost::shared_ptr optimize2(const GaussianISAM2::sharedClique& root) {
- // boost::shared_ptr delta(new VectorValues());
- // set changed;
- // // starting from the root, call optimize on each conditional
- // optimize2(root, delta);
- // return delta;
- //}
-
/* ************************************************************************* */
template
- int optimize2(const boost::shared_ptr& root, double threshold, const vector& keys, Permuted& delta) {
+ int optimizeWildfire(const boost::shared_ptr& root, double threshold, const vector& keys, Permuted& delta) {
vector changed(keys.size(), false);
int count = 0;
// starting from the root, call optimize on each conditional
- optimize2(root, threshold, changed, keys, delta, count);
+ if(root)
+ internal::optimizeWildfire(root, threshold, changed, keys, delta, count);
return count;
}
+ /* ************************************************************************* */
+ template
+ VectorValues optimizeGradientSearch(const ISAM2& isam) {
+ tic(0, "Allocate VectorValues");
+ VectorValues grad = *allocateVectorValues(isam);
+ toc(0, "Allocate VectorValues");
+
+ optimizeGradientSearchInPlace(isam, grad);
+
+ return grad;
+ }
+
+ /* ************************************************************************* */
+ template
+ void optimizeGradientSearchInPlace(const ISAM2& Rd, VectorValues& grad) {
+ tic(1, "Compute Gradient");
+ // Compute gradient (call gradientAtZero function, which is defined for various linear systems)
+ gradientAtZero(Rd, grad);
+ double gradientSqNorm = grad.dot(grad);
+ toc(1, "Compute Gradient");
+
+ tic(2, "Compute R*g");
+ // Compute R * g
+ FactorGraph Rd_jfg(Rd);
+ Errors Rg = Rd_jfg * grad;
+ toc(2, "Compute R*g");
+
+ tic(3, "Compute minimizing step size");
+ // Compute minimizing step size
+ double step = -gradientSqNorm / dot(Rg, Rg);
+ toc(3, "Compute minimizing step size");
+
+ tic(4, "Compute point");
+ // Compute steepest descent point
+ scal(step, grad);
+ toc(4, "Compute point");
+ }
+
/* ************************************************************************* */
template
void nnz_internal(const boost::shared_ptr& clique, int& result) {
diff --git a/gtsam/nonlinear/GaussianISAM2.cpp b/gtsam/nonlinear/GaussianISAM2.cpp
index 34456ad36..624c5d4f0 100644
--- a/gtsam/nonlinear/GaussianISAM2.cpp
+++ b/gtsam/nonlinear/GaussianISAM2.cpp
@@ -26,16 +26,6 @@ using namespace gtsam;
namespace gtsam {
- /* ************************************************************************* */
- VectorValues gradient(const BayesTree& bayesTree, const VectorValues& x0) {
- return gradient(FactorGraph(bayesTree), x0);
- }
-
- /* ************************************************************************* */
- void gradientAtZero(const BayesTree& bayesTree, VectorValues& g) {
- gradientAtZero(FactorGraph(bayesTree), g);
- }
-
/* ************************************************************************* */
VectorValues gradient(const BayesTree >& bayesTree, const VectorValues& x0) {
return gradient(FactorGraph(bayesTree), x0);
diff --git a/gtsam/nonlinear/GaussianISAM2.h b/gtsam/nonlinear/GaussianISAM2.h
index d1a4bef9f..a602d81a6 100644
--- a/gtsam/nonlinear/GaussianISAM2.h
+++ b/gtsam/nonlinear/GaussianISAM2.h
@@ -63,48 +63,65 @@ public:
};
-/** optimize the BayesTree, starting from the root */
-template
-void optimize2(const boost::shared_ptr& root, VectorValues& delta);
+/** Get the linear delta for the ISAM2 object, unpermuted the delta returned by ISAM2::getDelta() */
+template
+VectorValues optimize(const ISAM2& isam) {
+ VectorValues delta = *allocateVectorValues(isam);
+ internal::optimizeInPlace(isam.root(), delta);
+ return delta;
+}
-/// optimize the BayesTree, starting from the root; "replaced" needs to contain
+/// Optimize the BayesTree, starting from the root.
+/// @param replaced Needs to contain
/// all variables that are contained in the top of the Bayes tree that has been
-/// redone; "delta" is the current solution, an offset from the linearization
-/// point; "threshold" is the maximum change against the PREVIOUS delta for
+/// redone.
+/// @param delta The current solution, an offset from the linearization
+/// point.
+/// @param threshold The maximum change against the PREVIOUS delta for
/// non-replaced variables that can be ignored, ie. the old delta entry is kept
/// and recursive backsubstitution might eventually stop if none of the changed
/// variables are contained in the subtree.
-/// returns the number of variables that were solved for
+/// @return The number of variables that were solved for
template
-int optimize2(const boost::shared_ptr& root,
+int optimizeWildfire(const boost::shared_ptr& root,
double threshold, const std::vector& replaced, Permuted& delta);
+/**
+ * Optimize along the gradient direction, with a closed-form computation to
+ * perform the line search. The gradient is computed about \f$ \delta x=0 \f$.
+ *
+ * This function returns \f$ \delta x \f$ that minimizes a reparametrized
+ * problem. The error function of a GaussianBayesNet is
+ *
+ * \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f]
+ *
+ * with gradient and Hessian
+ *
+ * \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
+ *
+ * This function performs the line search in the direction of the
+ * gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size
+ * \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$:
+ *
+ * \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
+ *
+ * Optimizing by setting the derivative to zero yields
+ * \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function
+ * evaluates the denominator without computing the Hessian \f$ G \f$, returning
+ *
+ * \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f]
+ */
+template
+VectorValues optimizeGradientSearch(const ISAM2& isam);
+
+/** In-place version of optimizeGradientSearch requiring pre-allocated VectorValues \c x */
+template