Merge remote-tracking branch 'origin/develop' into feature/rotation_group

# Conflicts:
#	gtsam.h
#	gtsam/linear/GaussianBayesNet.cpp
release/4.3a0
Frank Dellaert 2019-11-27 14:18:39 -08:00
commit e971c933d2
70 changed files with 4485 additions and 910 deletions

View File

@ -10,7 +10,7 @@ endif()
# Set the version number for the library
set (GTSAM_VERSION_MAJOR 4)
set (GTSAM_VERSION_MINOR 0)
set (GTSAM_VERSION_PATCH 0)
set (GTSAM_VERSION_PATCH 2)
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
@ -171,39 +171,22 @@ if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILE
message(FATAL_ERROR "Missing required Boost components >= v1.43, please install/upgrade Boost or configure your search paths.")
endif()
# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
# JLBC: This was once updated to target-based names (Boost::xxx), but it caused
# problems with Boost versions newer than FindBoost.cmake was prepared to handle,
# so we downgraded this to classic filenames-based variables, and manually adding
# the target_include_directories(xxx ${Boost_INCLUDE_DIR})
# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
set(GTSAM_BOOST_LIBRARIES
optimized ${Boost_SERIALIZATION_LIBRARY_RELEASE}
optimized ${Boost_SYSTEM_LIBRARY_RELEASE}
optimized ${Boost_FILESYSTEM_LIBRARY_RELEASE}
optimized ${Boost_THREAD_LIBRARY_RELEASE}
optimized ${Boost_DATE_TIME_LIBRARY_RELEASE}
optimized ${Boost_REGEX_LIBRARY_RELEASE}
debug ${Boost_SERIALIZATION_LIBRARY_DEBUG}
debug ${Boost_SYSTEM_LIBRARY_DEBUG}
debug ${Boost_FILESYSTEM_LIBRARY_DEBUG}
debug ${Boost_THREAD_LIBRARY_DEBUG}
debug ${Boost_DATE_TIME_LIBRARY_DEBUG}
debug ${Boost_REGEX_LIBRARY_DEBUG}
Boost::serialization
Boost::system
Boost::filesystem
Boost::thread
Boost::date_time
Boost::regex
)
message(STATUS "GTSAM_BOOST_LIBRARIES: ${GTSAM_BOOST_LIBRARIES}")
if (GTSAM_DISABLE_NEW_TIMERS)
message("WARNING: GTSAM timing instrumentation manually disabled")
list_append_cache(GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS)
else()
if(Boost_TIMER_LIBRARY)
list(APPEND GTSAM_BOOST_LIBRARIES
optimized ${Boost_TIMER_LIBRARY_RELEASE}
optimized ${Boost_CHRONO_LIBRARY_RELEASE}
debug ${Boost_TIMER_LIBRARY_DEBUG}
debug ${Boost_CHRONO_LIBRARY_DEBUG}
)
list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono)
else()
list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt
message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.")

View File

@ -6,7 +6,7 @@ file(GLOB cppunitlite_src "*.cpp")
add_library(CppUnitLite STATIC ${cppunitlite_src} ${cppunitlite_headers})
list(APPEND GTSAM_EXPORTED_TARGETS CppUnitLite)
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
target_include_directories(CppUnitLite PUBLIC ${Boost_INCLUDE_DIR}) # boost/lexical_cast.h
target_link_libraries(CppUnitLite PUBLIC Boost::boost) # boost/lexical_cast.h
gtsam_assign_source_folders("${cppunitlite_headers};${cppunitlite_src}") # MSVC project structure

2347
cmake/FindBoost.cmake Normal file

File diff suppressed because it is too large Load Diff

View File

@ -94,7 +94,8 @@ if(MSVC)
set(GTSAM_COMPILE_OPTIONS_PRIVATE_TIMING /MD /O2 CACHE STRING "(User editable) Private compiler flags for Timing configuration.")
else()
# Common to all configurations, next for each configuration:
set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON -Wall CACHE STRING "(User editable) Private compiler flags for all configurations.")
# "-fPIC" is to ensure proper code generation for shared libraries
set(GTSAM_COMPILE_OPTIONS_PRIVATE_COMMON -Wall -fPIC CACHE STRING "(User editable) Private compiler flags for all configurations.")
set(GTSAM_COMPILE_OPTIONS_PRIVATE_DEBUG -g -fno-inline CACHE STRING "(User editable) Private compiler flags for Debug configuration.")
set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELWITHDEBINFO -g -O3 CACHE STRING "(User editable) Private compiler flags for RelWithDebInfo configuration.")
set(GTSAM_COMPILE_OPTIONS_PRIVATE_RELEASE -O3 CACHE STRING "(User editable) Private compiler flags for Release configuration.")

View File

@ -20,13 +20,37 @@ function(GtsamMakeConfigFile PACKAGE_NAME)
set(EXTRA_FILE "_does_not_exist_")
endif()
# GTSAM defines its version variable as GTSAM_VERSION_STRING while other
# projects may define it as <ProjectName>_VERSION. Since this file is
# installed on the system as part of GTSAMCMakeTools and may be useful in
# external projects, we need to handle both cases of version variable naming
# here.
if(NOT DEFINED ${PACKAGE_NAME}_VERSION AND DEFINED ${PACKAGE_NAME}_VERSION_STRING)
set(${PACKAGE_NAME}_VERSION ${${PACKAGE_NAME}_VERSION_STRING})
endif()
# Version file
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
"${PROJECT_BINARY_DIR}/${PACKAGE_NAME}ConfigVersion.cmake"
VERSION ${${PACKAGE_NAME}_VERSION}
COMPATIBILITY SameMajorVersion
)
# Config file
file(RELATIVE_PATH CONF_REL_INCLUDE_DIR "${CMAKE_INSTALL_PREFIX}/${DEF_INSTALL_CMAKE_DIR}" "${CMAKE_INSTALL_PREFIX}/include")
file(RELATIVE_PATH CONF_REL_LIB_DIR "${CMAKE_INSTALL_PREFIX}/${DEF_INSTALL_CMAKE_DIR}" "${CMAKE_INSTALL_PREFIX}/lib")
configure_file(${GTSAM_CONFIG_TEMPLATE_PATH}/Config.cmake.in "${PROJECT_BINARY_DIR}/${PACKAGE_NAME}Config.cmake" @ONLY)
message(STATUS "Wrote ${PROJECT_BINARY_DIR}/${PACKAGE_NAME}Config.cmake")
# Install config and exports files (for find scripts)
install(FILES "${PROJECT_BINARY_DIR}/${PACKAGE_NAME}Config.cmake" DESTINATION "${CMAKE_INSTALL_PREFIX}/${DEF_INSTALL_CMAKE_DIR}")
# Install config, version and exports files (for find scripts)
install(
FILES
"${PROJECT_BINARY_DIR}/${PACKAGE_NAME}Config.cmake"
"${PROJECT_BINARY_DIR}/${PACKAGE_NAME}ConfigVersion.cmake"
DESTINATION
"${CMAKE_INSTALL_PREFIX}/${DEF_INSTALL_CMAKE_DIR}"
)
install(EXPORT ${PACKAGE_NAME}-exports DESTINATION ${DEF_INSTALL_CMAKE_DIR})
endfunction()

View File

@ -1,88 +0,0 @@
# This is FindGTSAM.cmake
# DEPRECIATED: Use config file approach to pull in targets from gtsam
# CMake module to locate the GTSAM package
#
# The following cache variables may be set before calling this script:
#
# GTSAM_DIR (or GTSAM_ROOT): (Optional) The install prefix OR source tree of gtsam (e.g. /usr/local or src/gtsam)
# GTSAM_BUILD_NAME: (Optional) If compiling against a source tree, the name of the build directory
# within it (e.g build-debug). Without this defined, this script tries to
# intelligently find the build directory based on the project's build directory name
# or based on the build type (Debug/Release/etc).
#
# The following variables will be defined:
#
# GTSAM_FOUND : TRUE if the package has been successfully found
# GTSAM_INCLUDE_DIR : paths to GTSAM's INCLUDE directories
# GTSAM_LIBS : paths to GTSAM's libraries
#
# NOTES on compiling against an uninstalled GTSAM build tree:
# - A GTSAM source tree will be automatically searched for in the directory
# 'gtsam' next to your project directory, after searching
# CMAKE_INSTALL_PREFIX and $HOME, but before searching /usr/local and /usr.
# - The build directory will be searched first with the same name as your
# project's build directory, e.g. if you build from 'MyProject/build-optimized',
# 'gtsam/build-optimized' will be searched first. Next, a build directory for
# your project's build type, e.g. if CMAKE_BUILD_TYPE in your project is
# 'Release', then 'gtsam/build-release' will be searched next. Finally, plain
# 'gtsam/build' will be searched.
# - You can control the gtsam build directory name directly by defining the CMake
# cache variable 'GTSAM_BUILD_NAME', then only 'gtsam/${GTSAM_BUILD_NAME} will
# be searched.
# - Use the standard CMAKE_PREFIX_PATH, or GTSAM_DIR, to find a specific gtsam
# directory.
# Get path suffixes to help look for gtsam
if(GTSAM_BUILD_NAME)
set(gtsam_build_names "${GTSAM_BUILD_NAME}/gtsam")
else()
# lowercase build type
string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type_suffix)
# build suffix of this project
get_filename_component(my_build_name "${CMAKE_BINARY_DIR}" NAME)
set(gtsam_build_names "${my_build_name}/gtsam" "build-${build_type_suffix}/gtsam" "build/gtsam")
endif()
# Use GTSAM_ROOT or GTSAM_DIR equivalently
if(GTSAM_ROOT AND NOT GTSAM_DIR)
set(GTSAM_DIR "${GTSAM_ROOT}")
endif()
if(GTSAM_DIR)
# Find include dirs
find_path(GTSAM_INCLUDE_DIR gtsam/inference/FactorGraph.h
PATHS "${GTSAM_DIR}/include" "${GTSAM_DIR}" NO_DEFAULT_PATH
DOC "GTSAM include directories")
# Find libraries
find_library(GTSAM_LIBS NAMES gtsam
HINTS "${GTSAM_DIR}/lib" "${GTSAM_DIR}" NO_DEFAULT_PATH
PATH_SUFFIXES ${gtsam_build_names}
DOC "GTSAM libraries")
else()
# Find include dirs
set(extra_include_paths ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" "${PROJECT_SOURCE_DIR}/../gtsam" /usr/local/include /usr/include)
find_path(GTSAM_INCLUDE_DIR gtsam/inference/FactorGraph.h
PATHS ${extra_include_paths}
DOC "GTSAM include directories")
if(NOT GTSAM_INCLUDE_DIR)
message(STATUS "Searched for gtsam headers in default paths plus ${extra_include_paths}")
endif()
# Find libraries
find_library(GTSAM_LIBS NAMES gtsam
HINTS ${CMAKE_INSTALL_PREFIX}/lib "$ENV{HOME}/lib" "${PROJECT_SOURCE_DIR}/../gtsam" /usr/local/lib /usr/lib
PATH_SUFFIXES ${gtsam_build_names}
DOC "GTSAM libraries")
endif()
# handle the QUIETLY and REQUIRED arguments and set GTSAM_FOUND to TRUE
# if all listed variables are TRUE
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GTSAM DEFAULT_MSG
GTSAM_LIBS GTSAM_INCLUDE_DIR)

View File

@ -1,88 +0,0 @@
# This is FindGTSAM_UNSTABLE.cmake
# DEPRECIATED: Use config file approach to pull in targets from gtsam
# CMake module to locate the GTSAM_UNSTABLE package
#
# The following cache variables may be set before calling this script:
#
# GTSAM_UNSTABLE_DIR (or GTSAM_UNSTABLE_ROOT): (Optional) The install prefix OR source tree of gtsam_unstable (e.g. /usr/local or src/gtsam_unstable)
# GTSAM_UNSTABLE_BUILD_NAME: (Optional) If compiling against a source tree, the name of the build directory
# within it (e.g build-debug). Without this defined, this script tries to
# intelligently find the build directory based on the project's build directory name
# or based on the build type (Debug/Release/etc).
#
# The following variables will be defined:
#
# GTSAM_UNSTABLE_FOUND : TRUE if the package has been successfully found
# GTSAM_UNSTABLE_INCLUDE_DIR : paths to GTSAM_UNSTABLE's INCLUDE directories
# GTSAM_UNSTABLE_LIBS : paths to GTSAM_UNSTABLE's libraries
#
# NOTES on compiling against an uninstalled GTSAM_UNSTABLE build tree:
# - A GTSAM_UNSTABLE source tree will be automatically searched for in the directory
# 'gtsam_unstable' next to your project directory, after searching
# CMAKE_INSTALL_PREFIX and $HOME, but before searching /usr/local and /usr.
# - The build directory will be searched first with the same name as your
# project's build directory, e.g. if you build from 'MyProject/build-optimized',
# 'gtsam_unstable/build-optimized' will be searched first. Next, a build directory for
# your project's build type, e.g. if CMAKE_BUILD_TYPE in your project is
# 'Release', then 'gtsam_unstable/build-release' will be searched next. Finally, plain
# 'gtsam_unstable/build' will be searched.
# - You can control the gtsam build directory name directly by defining the CMake
# cache variable 'GTSAM_UNSTABLE_BUILD_NAME', then only 'gtsam/${GTSAM_UNSTABLE_BUILD_NAME} will
# be searched.
# - Use the standard CMAKE_PREFIX_PATH, or GTSAM_UNSTABLE_DIR, to find a specific gtsam
# directory.
# Get path suffixes to help look for gtsam_unstable
if(GTSAM_UNSTABLE_BUILD_NAME)
set(gtsam_unstable_build_names "${GTSAM_UNSTABLE_BUILD_NAME}/gtsam_unstable")
else()
# lowercase build type
string(TOLOWER "${CMAKE_BUILD_TYPE}" build_type_suffix)
# build suffix of this project
get_filename_component(my_build_name "${CMAKE_BINARY_DIR}" NAME)
set(gtsam_unstable_build_names "${my_build_name}/gtsam_unstable" "build-${build_type_suffix}/gtsam_unstable" "build/gtsam_unstable")
endif()
# Use GTSAM_UNSTABLE_ROOT or GTSAM_UNSTABLE_DIR equivalently
if(GTSAM_UNSTABLE_ROOT AND NOT GTSAM_UNSTABLE_DIR)
set(GTSAM_UNSTABLE_DIR "${GTSAM_UNSTABLE_ROOT}")
endif()
if(GTSAM_UNSTABLE_DIR)
# Find include dirs
find_path(GTSAM_UNSTABLE_INCLUDE_DIR gtsam_unstable/base/DSF.h
PATHS "${GTSAM_UNSTABLE_DIR}/include" "${GTSAM_UNSTABLE_DIR}" NO_DEFAULT_PATH
DOC "GTSAM_UNSTABLE include directories")
# Find libraries
find_library(GTSAM_UNSTABLE_LIBS NAMES gtsam_unstable
HINTS "${GTSAM_UNSTABLE_DIR}/lib" "${GTSAM_UNSTABLE_DIR}" NO_DEFAULT_PATH
PATH_SUFFIXES ${gtsam_unstable_build_names}
DOC "GTSAM_UNSTABLE libraries")
else()
# Find include dirs
set(extra_include_paths ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" "${PROJECT_SOURCE_DIR}/../gtsam" /usr/local/include /usr/include)
find_path(GTSAM_UNSTABLE_INCLUDE_DIR gtsam_unstable/base/DSF.h
PATHS ${extra_include_paths}
DOC "GTSAM_UNSTABLE include directories")
if(NOT GTSAM_UNSTABLE_INCLUDE_DIR)
message(STATUS "Searched for gtsam_unstable headers in default paths plus ${extra_include_paths}")
endif()
# Find libraries
find_library(GTSAM_UNSTABLE_LIBS NAMES gtsam_unstable
HINTS ${CMAKE_INSTALL_PREFIX}/lib "$ENV{HOME}/lib" "${PROJECT_SOURCE_DIR}/../gtsam" /usr/local/lib /usr/lib
PATH_SUFFIXES ${gtsam_unstable_build_names}
DOC "GTSAM_UNSTABLE libraries")
endif()
# handle the QUIETLY and REQUIRED arguments and set GTSAM_UNSTABLE_FOUND to TRUE
# if all listed variables are TRUE
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GTSAM_UNSTABLE DEFAULT_MSG
GTSAM_UNSTABLE_LIBS GTSAM_UNSTABLE_INCLUDE_DIR)

View File

@ -0,0 +1,56 @@
"""
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
See LICENSE for the license information
Simple robot motion example, with prior and one GPS measurements
Author: Mandy Xie
"""
# pylint: disable=invalid-name, E1101
from __future__ import print_function
import numpy as np
import gtsam
import matplotlib.pyplot as plt
import gtsam.utils.plot as gtsam_plot
# ENU Origin is where the plane was in hold next to runway
lat0 = 33.86998
lon0 = -84.30626
h0 = 274
# Create noise models
GPS_NOISE = gtsam.noiseModel_Isotropic.Sigma(3, 0.1)
PRIOR_NOISE = gtsam.noiseModel_Isotropic.Sigma(6, 0.25)
# Create an empty nonlinear factor graph
graph = gtsam.NonlinearFactorGraph()
# Add a prior on the first point, setting it to the origin
# A prior factor consists of a mean and a noise model (covariance matrix)
priorMean = gtsam.Pose3() # prior at origin
graph.add(gtsam.PriorFactorPose3(1, priorMean, PRIOR_NOISE))
# Add GPS factors
gps = gtsam.Point3(lat0, lon0, h0)
graph.add(gtsam.GPSFactor(1, gps, GPS_NOISE))
print("\nFactor Graph:\n{}".format(graph))
# Create the data structure to hold the initialEstimate estimate to the solution
# For illustrative purposes, these have been deliberately set to incorrect values
initial = gtsam.Values()
initial.insert(1, gtsam.Pose3())
print("\nInitial Estimate:\n{}".format(initial))
# optimize using Levenberg-Marquardt optimization
params = gtsam.LevenbergMarquardtParams()
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
result = optimizer.optimize()
print("\nFinal Result:\n{}".format(result))

View File

@ -0,0 +1,91 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
Unit tests for Linear Factor Graphs.
Author: Frank Dellaert & Gerry Chen
"""
# pylint: disable=invalid-name, no-name-in-module, no-member
from __future__ import print_function
import unittest
import gtsam
from gtsam.utils.test_case import GtsamTestCase
import numpy as np
def create_graph():
"""Create a basic linear factor graph for testing"""
graph = gtsam.GaussianFactorGraph()
x0 = gtsam.symbol(ord('x'), 0)
x1 = gtsam.symbol(ord('x'), 1)
x2 = gtsam.symbol(ord('x'), 2)
BETWEEN_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1))
PRIOR_NOISE = gtsam.noiseModel_Diagonal.Sigmas(np.ones(1))
graph.add(x1, np.eye(1), x0, -np.eye(1), np.ones(1), BETWEEN_NOISE)
graph.add(x2, np.eye(1), x1, -np.eye(1), 2*np.ones(1), BETWEEN_NOISE)
graph.add(x0, np.eye(1), np.zeros(1), PRIOR_NOISE)
return graph, (x0, x1, x2)
class TestGaussianFactorGraph(GtsamTestCase):
"""Tests for Gaussian Factor Graphs."""
def test_fg(self):
"""Test solving a linear factor graph"""
graph, X = create_graph()
result = graph.optimize()
EXPECTEDX = [0, 1, 3]
# check solutions
self.assertAlmostEqual(EXPECTEDX[0], result.at(X[0]), delta=1e-8)
self.assertAlmostEqual(EXPECTEDX[1], result.at(X[1]), delta=1e-8)
self.assertAlmostEqual(EXPECTEDX[2], result.at(X[2]), delta=1e-8)
def test_convertNonlinear(self):
"""Test converting a linear factor graph to a nonlinear one"""
graph, X = create_graph()
EXPECTEDM = [1, 2, 3]
# create nonlinear factor graph for marginalization
nfg = gtsam.LinearContainerFactor.ConvertLinearGraph(graph)
optimizer = gtsam.LevenbergMarquardtOptimizer(nfg, gtsam.Values())
nlresult = optimizer.optimizeSafely()
# marginalize
marginals = gtsam.Marginals(nfg, nlresult)
m = [marginals.marginalCovariance(x) for x in X]
# check linear marginalizations
self.assertAlmostEqual(EXPECTEDM[0], m[0], delta=1e-8)
self.assertAlmostEqual(EXPECTEDM[1], m[1], delta=1e-8)
self.assertAlmostEqual(EXPECTEDM[2], m[2], delta=1e-8)
def test_linearMarginalization(self):
"""Marginalize a linear factor graph"""
graph, X = create_graph()
result = graph.optimize()
EXPECTEDM = [1, 2, 3]
# linear factor graph marginalize
marginals = gtsam.Marginals(graph, result)
m = [marginals.marginalCovariance(x) for x in X]
# check linear marginalizations
self.assertAlmostEqual(EXPECTEDM[0], m[0], delta=1e-8)
self.assertAlmostEqual(EXPECTEDM[1], m[1], delta=1e-8)
self.assertAlmostEqual(EXPECTEDM[2], m[2], delta=1e-8)
if __name__ == '__main__':
unittest.main()

2
debian/control vendored
View File

@ -10,6 +10,6 @@ Vcs-Browser: https://github.com/borglab/gtsam
Package: libgtsam-dev
Architecture: any
Depends: ${shlibs:Depends}, ${misc:Depends}
Depends: ${shlibs:Depends}, ${misc:Depends}, libboost-serialization-dev, libboost-system-dev, libboost-filesystem-dev, libboost-thread-dev, libboost-program-options-dev, libboost-date-time-dev, libboost-timer-dev, libboost-chrono-dev, libboost-regex-dev
Description: Georgia Tech Smoothing and Mapping Library
gtsam: Georgia Tech Smoothing and Mapping library for SLAM type applications

2
debian/rules vendored
View File

@ -21,5 +21,5 @@ export DH_VERBOSE = 1
# dh_make generated override targets
# This is example for Cmake (See https://bugs.debian.org/641051 )
override_dh_auto_configure:
dh_auto_configure -- -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_WRAP=OFF -DGTSAM_BUILD_DOCS=OFF -DGTSAM_INSTALL_CPPUNITLITE=OFF -DGTSAM_INSTALL_GEOGRAPHICLIB=OFF -DGTSAM_BUILD_TYPE_POSTFIXES=OFF
dh_auto_configure -- -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr -DGTSAM_BUILD_EXAMPLES_ALWAYS=OFF -DGTSAM_BUILD_TESTS=OFF -DGTSAM_BUILD_WRAP=OFF -DGTSAM_BUILD_DOCS=OFF -DGTSAM_INSTALL_CPPUNITLITE=OFF -DGTSAM_INSTALL_GEOGRAPHICLIB=OFF -DGTSAM_BUILD_TYPE_POSTFIXES=OFF -DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF

115
gtsam.h
View File

@ -1445,6 +1445,9 @@ virtual class Null: gtsam::noiseModel::mEstimator::Base {
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class Fair: gtsam::noiseModel::mEstimator::Base {
@ -1453,6 +1456,9 @@ virtual class Fair: gtsam::noiseModel::mEstimator::Base {
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class Huber: gtsam::noiseModel::mEstimator::Base {
@ -1461,6 +1467,20 @@ virtual class Huber: gtsam::noiseModel::mEstimator::Base {
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class Cauchy: gtsam::noiseModel::mEstimator::Base {
Cauchy(double k);
static gtsam::noiseModel::mEstimator::Cauchy* Create(double k);
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
@ -1469,6 +1489,9 @@ virtual class Tukey: gtsam::noiseModel::mEstimator::Base {
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
@ -1477,8 +1500,43 @@ virtual class Welsch: gtsam::noiseModel::mEstimator::Base {
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class GemanMcClure: gtsam::noiseModel::mEstimator::Base {
GemanMcClure(double c);
static gtsam::noiseModel::mEstimator::GemanMcClure* Create(double c);
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class DCS: gtsam::noiseModel::mEstimator::Base {
DCS(double c);
static gtsam::noiseModel::mEstimator::DCS* Create(double c);
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
virtual class L2WithDeadZone: gtsam::noiseModel::mEstimator::Base {
L2WithDeadZone(double k);
static gtsam::noiseModel::mEstimator::L2WithDeadZone* Create(double k);
// enabling serialization functionality
void serializable() const;
double weight(double error) const;
double residual(double error) const;
};
}///\namespace mEstimator
@ -2100,6 +2158,10 @@ class Values {
class Marginals {
Marginals(const gtsam::NonlinearFactorGraph& graph,
const gtsam::Values& solution);
Marginals(const gtsam::GaussianFactorGraph& gfgraph,
const gtsam::Values& solution);
Marginals(const gtsam::GaussianFactorGraph& gfgraph,
const gtsam::VectorValues& solutionvec);
void print(string s) const;
Matrix marginalCovariance(size_t variable) const;
@ -2432,7 +2494,7 @@ virtual class PriorFactor : gtsam::NoiseModelFactor {
#include <gtsam/slam/BetweenFactor.h>
template<T = {gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::SO3, gtsam::SO4, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::imuBias::ConstantBias}>
template<T = {Vector, gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::SO3, gtsam::SO4, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::imuBias::ConstantBias}>
virtual class BetweenFactor : gtsam::NoiseModelFactor {
BetweenFactor(size_t key1, size_t key2, const T& relativePose, const gtsam::noiseModel::Base* noiseModel);
T measured() const;
@ -2845,7 +2907,33 @@ virtual class ImuFactor: gtsam::NonlinearFactor {
};
#include <gtsam/navigation/CombinedImuFactor.h>
virtual class PreintegrationCombinedParams : gtsam::PreintegrationParams {
PreintegrationCombinedParams(Vector n_gravity);
static gtsam::PreintegrationCombinedParams* MakeSharedD(double g);
static gtsam::PreintegrationCombinedParams* MakeSharedU(double g);
static gtsam::PreintegrationCombinedParams* MakeSharedD(); // default g = 9.81
static gtsam::PreintegrationCombinedParams* MakeSharedU(); // default g = 9.81
// Testable
void print(string s) const;
bool equals(const gtsam::PreintegrationCombinedParams& expected, double tol);
void setBiasAccCovariance(Matrix cov);
void setBiasOmegaCovariance(Matrix cov);
void setBiasAccOmegaInt(Matrix cov);
Matrix getBiasAccCovariance() const ;
Matrix getBiasOmegaCovariance() const ;
Matrix getBiasAccOmegaInt() const;
};
class PreintegratedCombinedMeasurements {
// Constructors
PreintegratedCombinedMeasurements(const gtsam::PreintegrationCombinedParams* params);
PreintegratedCombinedMeasurements(const gtsam::PreintegrationCombinedParams* params,
const gtsam::imuBias::ConstantBias& bias);
// Testable
void print(string s) const;
bool equals(const gtsam::PreintegratedCombinedMeasurements& expected,
@ -2943,6 +3031,31 @@ virtual class Pose3AttitudeFactor : gtsam::NonlinearFactor {
gtsam::Unit3 bRef() const;
};
#include <gtsam/navigation/GPSFactor.h>
virtual class GPSFactor : gtsam::NonlinearFactor{
GPSFactor(size_t key, const gtsam::Point3& gpsIn,
const gtsam::noiseModel::Base* model);
// Testable
void print(string s) const;
bool equals(const gtsam::GPSFactor& expected, double tol);
// Standard Interface
gtsam::Point3 measurementIn() const;
};
virtual class GPSFactor2 : gtsam::NonlinearFactor {
GPSFactor2(size_t key, const gtsam::Point3& gpsIn,
const gtsam::noiseModel::Base* model);
// Testable
void print(string s) const;
bool equals(const gtsam::GPSFactor2& expected, double tol);
// Standard Interface
gtsam::Point3 measurementIn() const;
};
#include <gtsam/navigation/Scenario.h>
virtual class Scenario {
gtsam::Pose3 pose(double t) const;

View File

@ -1560,9 +1560,8 @@ PUBLIC Int CCOLAMD_2 /* returns TRUE if successful, FALSE otherwise */
Int *dead_cols ;
Int set1 ;
Int set2 ;
#ifndef NDEBUG
Int cs ;
#endif
int ok ;
#ifndef NDEBUG
@ -1910,10 +1909,8 @@ PUBLIC Int CCOLAMD_2 /* returns TRUE if successful, FALSE otherwise */
p [k] = col ;
ASSERT (A [col] == EMPTY) ;
#ifndef NDEBUG
cs = CMEMBER (col) ;
cs = CMEMBER (col) ;
ASSERT (k >= cset_start [cs] && k < cset_start [cs+1]) ;
#endif
A [col] = k ;
k++ ;
@ -1929,11 +1926,11 @@ PUBLIC Int CCOLAMD_2 /* returns TRUE if successful, FALSE otherwise */
if (A [col] == EMPTY)
{
k = Col [col].shared2.order ;
cs = CMEMBER (col) ;
#ifndef NDEBUG
cs = CMEMBER (col) ;
dead_cols [cs]-- ;
ASSERT (k >= cset_start [cs] && k < cset_start [cs+1]) ;
#endif
ASSERT (k >= cset_start [cs] && k < cset_start [cs+1]) ;
DEBUG1 (("ccolamd output ordering: k "ID" col "ID
" (dense or null col)\n", k, col)) ;
p [k] = col ;

View File

@ -7,8 +7,8 @@ export SUITESPARSE
# version of SuiteSparse_config is also version of SuiteSparse meta-package
LIBRARY = libsuitesparseconfig
VERSION = 4.5.6
SO_VERSION = 4
VERSION = 5.4.0
SO_VERSION = 5
default: library

View File

@ -1,4 +1,4 @@
SuiteSparse_config, 2017, Timothy A. Davis, http://www.suitesparse.com
SuiteSparse_config, 2018, Timothy A. Davis, http://www.suitesparse.com
(formerly the UFconfig package)
This directory contains a default SuiteSparse_config.mk file. It tries to
@ -37,6 +37,7 @@ SuiteSparse_config is not required by these packages:
CSparse a Concise Sparse matrix package
MATLAB_Tools toolboxes for use in MATLAB
GraphBLAS graph algorithms in the language of linear algebra
In addition, the xerbla/ directory contains Fortan and C versions of the
BLAS/LAPACK xerbla routine, which is called when an invalid input is passed to

View File

@ -4,7 +4,7 @@
/* SuiteSparse configuration : memory manager and printf functions. */
/* Copyright (c) 2013, Timothy A. Davis. No licensing restrictions
/* Copyright (c) 2013-2018, Timothy A. Davis. No licensing restrictions
* apply to this file or to the SuiteSparse_config directory.
* Author: Timothy A. Davis.
*/

View File

@ -177,38 +177,7 @@ int SuiteSparse_divcomplex
/* SuiteSparse is not a package itself, but a collection of packages, some of
* which must be used together (UMFPACK requires AMD, CHOLMOD requires AMD,
* COLAMD, CAMD, and CCOLAMD, etc). A version number is provided here for the
* collection itself. The versions of packages within each version of
* SuiteSparse are meant to work together. Combining one package from one
* version of SuiteSparse, with another package from another version of
* SuiteSparse, may or may not work.
*
* SuiteSparse contains the following packages:
*
* SuiteSparse_config version 4.5.6 (version always the same as SuiteSparse)
* AMD version 2.4.6
* BTF version 1.2.6
* CAMD version 2.4.6
* CCOLAMD version 2.9.6
* CHOLMOD version 3.0.11
* COLAMD version 2.9.6
* CSparse version 3.1.9
* CXSparse version 3.1.9
* GPUQREngine version 1.0.5
* KLU version 1.3.8
* LDL version 2.2.6
* RBio version 2.2.6
* SPQR version 2.0.8
* SuiteSparse_GPURuntime version 1.0.5
* UMFPACK version 5.7.6
* MATLAB_Tools various packages & M-files
* xerbla version 1.0.3
*
* Other package dependencies:
* BLAS required by CHOLMOD and UMFPACK
* LAPACK required by CHOLMOD
* METIS 5.1.0 required by CHOLMOD (optional) and KLU (optional)
* CUBLAS, CUDART NVIDIA libraries required by CHOLMOD and SPQR when
* they are compiled with GPU acceleration.
* collection itself, which is also the version number of SuiteSparse_config.
*/
int SuiteSparse_version /* returns SUITESPARSE_VERSION */
@ -233,11 +202,11 @@ int SuiteSparse_version /* returns SUITESPARSE_VERSION */
*/
#define SUITESPARSE_HAS_VERSION_FUNCTION
#define SUITESPARSE_DATE "Oct 3, 2017"
#define SUITESPARSE_DATE "Dec 28, 2018"
#define SUITESPARSE_VER_CODE(main,sub) ((main) * 1000 + (sub))
#define SUITESPARSE_MAIN_VERSION 4
#define SUITESPARSE_SUB_VERSION 5
#define SUITESPARSE_SUBSUB_VERSION 6
#define SUITESPARSE_MAIN_VERSION 5
#define SUITESPARSE_SUB_VERSION 4
#define SUITESPARSE_SUBSUB_VERSION 0
#define SUITESPARSE_VERSION \
SUITESPARSE_VER_CODE(SUITESPARSE_MAIN_VERSION,SUITESPARSE_SUB_VERSION)

View File

@ -3,9 +3,11 @@
#===============================================================================
# This file contains all configuration settings for all packages in SuiteSparse,
# except for CSparse (which is stand-alone) and the packages in MATLAB_Tools.
# except for CSparse (which is stand-alone), the packages in MATLAB_Tools,
# and GraphBLAS. The configuration settings for GraphBLAS are determined by
# GraphBLAS/CMakeLists.txt
SUITESPARSE_VERSION = 4.5.6
SUITESPARSE_VERSION = 5.4.0
#===============================================================================
# Options you can change without editing this file:
@ -57,6 +59,15 @@ SUITESPARSE_VERSION = 4.5.6
INSTALL_INCLUDE ?= $(INSTALL)/include
INSTALL_DOC ?= $(INSTALL)/share/doc/suitesparse-$(SUITESPARSE_VERSION)
CMAKE_OPTIONS ?= -DCMAKE_INSTALL_PREFIX=$(INSTALL)
#---------------------------------------------------------------------------
# parallel make
#---------------------------------------------------------------------------
# sequential make's by default
JOBS ?= 1
#---------------------------------------------------------------------------
# optimization level
#---------------------------------------------------------------------------
@ -78,19 +89,11 @@ SUITESPARSE_VERSION = 4.5.6
CXX = g++
BLAS = -lrefblas -lgfortran -lstdc++
LAPACK = -llapack
CFLAGS += --coverage
OPTIMIZATION = -g
LDFLAGS += --coverage
CFLAGS += --coverage
OPTIMIZATION = -g
LDFLAGS += --coverage
endif
#---------------------------------------------------------------------------
# CFLAGS for the C/C++ compiler
#---------------------------------------------------------------------------
# The CF macro is used by SuiteSparse Makefiles as a combination of
# CFLAGS, CPPFLAGS, TARGET_ARCH, and system-dependent settings.
CF ?= $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) $(OPTIMIZATION) -fexceptions -fPIC
#---------------------------------------------------------------------------
# OpenMP is used in CHOLMOD
#---------------------------------------------------------------------------
@ -112,10 +115,12 @@ SUITESPARSE_VERSION = 4.5.6
ifneq ($(AUTOCC),no)
ifneq ($(shell which icc 2>/dev/null),)
# use the Intel icc compiler for C codes, and -qopenmp for OpenMP
CC = icc -D_GNU_SOURCE
CXX = $(CC)
CC = icc
CFLAGS += -D_GNU_SOURCE
CXX = icpc
CFOPENMP = -qopenmp -I$(MKLROOT)/include
LDFLAGS += -openmp
LDFLAGS += -qopenmp
LDLIBS += -lm -lirc
endif
ifneq ($(shell which ifort 2>/dev/null),)
# use the Intel ifort compiler for Fortran codes
@ -123,6 +128,16 @@ SUITESPARSE_VERSION = 4.5.6
endif
endif
CMAKE_OPTIONS += -DCMAKE_CXX_COMPILER=$(CXX) -DCMAKE_C_COMPILER=$(CC)
#---------------------------------------------------------------------------
# CFLAGS for the C/C++ compiler
#---------------------------------------------------------------------------
# The CF macro is used by SuiteSparse Makefiles as a combination of
# CFLAGS, CPPFLAGS, TARGET_ARCH, and system-dependent settings.
CF ?= $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) $(OPTIMIZATION) -fexceptions -fPIC
#---------------------------------------------------------------------------
# code formatting (for Tcov on Linux only)
#---------------------------------------------------------------------------
@ -157,7 +172,7 @@ SUITESPARSE_VERSION = 4.5.6
# $(MKLROOT)/lib/intel64/libmkl_intel_thread.a \
# -Wl,--end-group -lpthread -lm
# using dynamic linking:
BLAS = -lmkl_intel_lp64 -lmkl_core -lmkl_intel_thread -lpthread -lm
BLAS = -lmkl_intel_lp64 -lmkl_core -lmkl_intel_thread -liomp5 -lpthread -lm
LAPACK =
else
# use the OpenBLAS at http://www.openblas.net
@ -223,12 +238,16 @@ SUITESPARSE_VERSION = 4.5.6
CUBLAS_LIB = $(CUDA_PATH)/lib64/libcublas.so
CUDA_INC_PATH = $(CUDA_PATH)/include/
CUDA_INC = -I$(CUDA_INC_PATH)
MAGMA_INC = -I/opt/magma-2.4.0/include/
MAGMA_LIB = -L/opt/magma-2.4.0/lib/ -lmagma
NVCC = $(CUDA_PATH)/bin/nvcc
NVCCFLAGS = -Xcompiler -fPIC -O3 \
-gencode=arch=compute_30,code=sm_30 \
-gencode=arch=compute_35,code=sm_35 \
-gencode=arch=compute_50,code=sm_50 \
-gencode=arch=compute_50,code=compute_50
-gencode=arch=compute_53,code=sm_53 \
-gencode=arch=compute_53,code=sm_53 \
-gencode=arch=compute_60,code=compute_60
endif
#---------------------------------------------------------------------------
@ -555,6 +574,7 @@ config:
@echo 'Install include files in: INSTALL_INCLUDE=' '$(INSTALL_INCLUDE)'
@echo 'Install documentation in: INSTALL_DOC= ' '$(INSTALL_DOC)'
@echo 'Optimization level: OPTIMIZATION= ' '$(OPTIMIZATION)'
@echo 'parallel make jobs: JOBS= ' '$(JOBS)'
@echo 'BLAS library: BLAS= ' '$(BLAS)'
@echo 'LAPACK library: LAPACK= ' '$(LAPACK)'
@echo 'Intel TBB library: TBB= ' '$(TBB)'

View File

@ -103,11 +103,7 @@ message(STATUS "Building GTSAM - shared: ${BUILD_SHARED_LIBS}")
# BUILD_SHARED_LIBS automatically defines static/shared libs:
add_library(gtsam ${gtsam_srcs})
# Boost:
target_link_libraries(gtsam PUBLIC ${GTSAM_BOOST_LIBRARIES})
target_include_directories(gtsam PUBLIC ${Boost_INCLUDE_DIR})
# Other deps:
target_link_libraries(gtsam PUBLIC ${GTSAM_ADDITIONAL_LIBRARIES})
# Apply build flags:

View File

@ -453,7 +453,7 @@ typename internal::FixedSizeMatrix<Y,X2>::type numericalDerivative52(
}
template<class Y, class X1, class X2, class X3, class X4, class X5>
inline typename internal::FixedSizeMatrix<Y,X2>::type numericalDerivative51(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&),
inline typename internal::FixedSizeMatrix<Y,X2>::type numericalDerivative52(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&),
const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) {
return numericalDerivative52<Y, X1, X2, X3, X4, X5>(boost::bind(h, _1, _2, _3, _4, _5), x1, x2, x3, x4, x5);
}
@ -509,7 +509,7 @@ typename internal::FixedSizeMatrix<Y,X4>::type numericalDerivative54(
}
template<class Y, class X1, class X2, class X3, class X4, class X5>
inline typename internal::FixedSizeMatrix<Y,X4>::type numericalDerivative53(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&),
inline typename internal::FixedSizeMatrix<Y,X4>::type numericalDerivative54(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&),
const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) {
return numericalDerivative54<Y, X1, X2, X3, X4, X5>(boost::bind(h, _1, _2, _3, _4, _5), x1, x2, x3, x4, x5);
}
@ -537,11 +537,185 @@ typename internal::FixedSizeMatrix<Y,X5>::type numericalDerivative55(
}
template<class Y, class X1, class X2, class X3, class X4, class X5>
inline typename internal::FixedSizeMatrix<Y,X5>::type numericalDerivative53(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&),
inline typename internal::FixedSizeMatrix<Y,X5>::type numericalDerivative55(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&),
const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, double delta = 1e-5) {
return numericalDerivative55<Y, X1, X2, X3, X4, X5>(boost::bind(h, _1, _2, _3, _4, _5), x1, x2, x3, x4, x5);
}
/**
* Compute numerical derivative in argument 1 of 6-argument function
* @param h quintic function yielding m-vector
* @param x1 n-dimensional first argument value
* @param x2 second argument value
* @param x3 third argument value
* @param x4 fourth argument value
* @param x5 fifth argument value
* @param x6 sixth argument value
* @param delta increment for numerical derivative
* @return m*n Jacobian computed via central differencing
*/
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
typename internal::FixedSizeMatrix<Y,X1>::type numericalDerivative61(
boost::function<Y(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&)> h, const X1& x1,
const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
"Template argument Y must be a manifold type.");
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X1>::structure_category>::value),
"Template argument X1 must be a manifold type.");
return numericalDerivative11<Y, X1>(boost::bind(h, _1, boost::cref(x2), boost::cref(x3), boost::cref(x4), boost::cref(x5), boost::cref(x6)), x1, delta);
}
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
inline typename internal::FixedSizeMatrix<Y,X1>::type numericalDerivative61(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&),
const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
return numericalDerivative61<Y, X1, X2, X3, X4, X5, X6>(boost::bind(h, _1, _2, _3, _4, _5, _6), x1, x2, x3, x4, x5, x6);
}
/**
* Compute numerical derivative in argument 2 of 6-argument function
* @param h quintic function yielding m-vector
* @param x1 n-dimensional first argument value
* @param x2 second argument value
* @param x3 third argument value
* @param x4 fourth argument value
* @param x5 fifth argument value
* @param x6 sixth argument value
* @param delta increment for numerical derivative
* @return m*n Jacobian computed via central differencing
*/
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
typename internal::FixedSizeMatrix<Y,X2>::type numericalDerivative62(
boost::function<Y(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&)> h, const X1& x1,
const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
"Template argument Y must be a manifold type.");
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X1>::structure_category>::value),
"Template argument X1 must be a manifold type.");
return numericalDerivative11<Y, X2>(boost::bind(h, boost::cref(x1), _1, boost::cref(x3), boost::cref(x4), boost::cref(x5), boost::cref(x6)), x2, delta);
}
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
inline typename internal::FixedSizeMatrix<Y,X2>::type numericalDerivative62(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&),
const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
return numericalDerivative62<Y, X1, X2, X3, X4, X5, X6>(boost::bind(h, _1, _2, _3, _4, _5, _6), x1, x2, x3, x4, x5, x6);
}
/**
* Compute numerical derivative in argument 3 of 6-argument function
* @param h quintic function yielding m-vector
* @param x1 n-dimensional first argument value
* @param x2 second argument value
* @param x3 third argument value
* @param x4 fourth argument value
* @param x5 fifth argument value
* @param x6 sixth argument value
* @param delta increment for numerical derivative
* @return m*n Jacobian computed via central differencing
*/
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
typename internal::FixedSizeMatrix<Y,X3>::type numericalDerivative63(
boost::function<Y(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&)> h, const X1& x1,
const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
"Template argument Y must be a manifold type.");
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X1>::structure_category>::value),
"Template argument X1 must be a manifold type.");
return numericalDerivative11<Y, X3>(boost::bind(h, boost::cref(x1), boost::cref(x2), _1, boost::cref(x4), boost::cref(x5), boost::cref(x6)), x3, delta);
}
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
inline typename internal::FixedSizeMatrix<Y,X3>::type numericalDerivative63(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&),
const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
return numericalDerivative63<Y, X1, X2, X3, X4, X5, X6>(boost::bind(h, _1, _2, _3, _4, _5, _6), x1, x2, x3, x4, x5, x6);
}
/**
* Compute numerical derivative in argument 4 of 6-argument function
* @param h quintic function yielding m-vector
* @param x1 n-dimensional first argument value
* @param x2 second argument value
* @param x3 third argument value
* @param x4 fourth argument value
* @param x5 fifth argument value
* @param x6 sixth argument value
* @param delta increment for numerical derivative
* @return m*n Jacobian computed via central differencing
*/
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
typename internal::FixedSizeMatrix<Y,X4>::type numericalDerivative64(
boost::function<Y(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&)> h, const X1& x1,
const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
"Template argument Y must be a manifold type.");
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X1>::structure_category>::value),
"Template argument X1 must be a manifold type.");
return numericalDerivative11<Y, X4>(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), _1, boost::cref(x5), boost::cref(x6)), x4, delta);
}
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
inline typename internal::FixedSizeMatrix<Y,X4>::type numericalDerivative64(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&),
const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
return numericalDerivative64<Y, X1, X2, X3, X4, X5>(boost::bind(h, _1, _2, _3, _4, _5, _6), x1, x2, x3, x4, x5, x6);
}
/**
* Compute numerical derivative in argument 5 of 6-argument function
* @param h quintic function yielding m-vector
* @param x1 n-dimensional first argument value
* @param x2 second argument value
* @param x3 third argument value
* @param x4 fourth argument value
* @param x5 fifth argument value
* @param x6 sixth argument value
* @param delta increment for numerical derivative
* @return m*n Jacobian computed via central differencing
*/
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
typename internal::FixedSizeMatrix<Y,X5>::type numericalDerivative65(
boost::function<Y(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&)> h, const X1& x1,
const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
"Template argument Y must be a manifold type.");
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X1>::structure_category>::value),
"Template argument X1 must be a manifold type.");
return numericalDerivative11<Y, X5>(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), boost::cref(x4), _1, boost::cref(x6)), x5, delta);
}
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
inline typename internal::FixedSizeMatrix<Y,X5>::type numericalDerivative65(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&),
const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
return numericalDerivative65<Y, X1, X2, X3, X4, X5, X6>(boost::bind(h, _1, _2, _3, _4, _5, _6), x1, x2, x3, x4, x5, x6);
}
/**
* Compute numerical derivative in argument 6 of 6-argument function
* @param h quintic function yielding m-vector
* @param x1 n-dimensional first argument value
* @param x2 second argument value
* @param x3 third argument value
* @param x4 fourth argument value
* @param x5 fifth argument value
* @param x6 sixth argument value
* @param delta increment for numerical derivative
* @return m*n Jacobian computed via central differencing
*/
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
typename internal::FixedSizeMatrix<Y,X5>::type numericalDerivative66(
boost::function<Y(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&)> h, const X1& x1,
const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
"Template argument Y must be a manifold type.");
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X1>::structure_category>::value),
"Template argument X1 must be a manifold type.");
return numericalDerivative11<Y, X6>(boost::bind(h, boost::cref(x1), boost::cref(x2), boost::cref(x3), boost::cref(x4), boost::cref(x5), _1), x6, delta);
}
template<class Y, class X1, class X2, class X3, class X4, class X5, class X6>
inline typename internal::FixedSizeMatrix<Y,X6>::type numericalDerivative66(Y (*h)(const X1&, const X2&, const X3&, const X4&, const X5&, const X6&),
const X1& x1, const X2& x2, const X3& x3, const X4& x4, const X5& x5, const X6& x6, double delta = 1e-5) {
return numericalDerivative66<Y, X1, X2, X3, X4, X5, X6>(boost::bind(h, _1, _2, _3, _4, _5, _6), x1, x2, x3, x4, x5, x6);
}
/**
* Compute numerical Hessian matrix. Requires a single-argument Lie->scalar
* function. This is implemented simply as the derivative of the gradient.

View File

@ -135,6 +135,86 @@ TEST(testNumericalDerivative, numericalHessian311) {
EXPECT(assert_equal(expected33, actual33, 1e-5));
}
/* ************************************************************************* */
Vector6 f6(const double x1, const double x2, const double x3, const double x4,
const double x5, const double x6) {
Vector6 result;
result << sin(x1), cos(x2), x3 * x3, x4 * x4 * x4, sqrt(x5), sin(x6) - cos(x6);
return result;
}
/* ************************************************************************* */
//
TEST(testNumericalDerivative, numeriDerivative61) {
double x1 = 1, x2 = 2, x3 = 3 , x4 = 4, x5 = 5, x6 = 6;
Matrix expected61 = (Matrix(6, 1) << cos(x1), 0, 0, 0, 0, 0).finished();
Matrix61 actual61 = numericalDerivative61<Vector6, double, double,
double, double, double, double>(f6, x1, x2, x3, x4, x5, x6);
EXPECT(assert_equal(expected61, actual61, 1e-5));
}
/* ************************************************************************* */
//
TEST(testNumericalDerivative, numeriDerivative62) {
double x1 = 1, x2 = 2, x3 = 3 , x4 = 4, x5 = 5, x6 = 6;
Matrix expected62 = (Matrix(6, 1) << 0, -sin(x2), 0, 0, 0, 0).finished();
Matrix61 actual62 = numericalDerivative62<Vector6, double, double, double,
double, double, double>(f6, x1, x2, x3, x4, x5, x6);
EXPECT(assert_equal(expected62, actual62, 1e-5));
}
/* ************************************************************************* */
//
TEST(testNumericalDerivative, numeriDerivative63) {
double x1 = 1, x2 = 2, x3 = 3 , x4 = 4, x5 = 5, x6 = 6;
Matrix expected63 = (Matrix(6, 1) << 0, 0, 2 * x3, 0, 0, 0).finished();
Matrix61 actual63 = numericalDerivative63<Vector6, double, double, double,
double, double, double>(f6, x1, x2, x3, x4, x5, x6);
EXPECT(assert_equal(expected63, actual63, 1e-5));
}
/* ************************************************************************* */
//
TEST(testNumericalDerivative, numeriDerivative64) {
double x1 = 1, x2 = 2, x3 = 3 , x4 = 4, x5 = 5, x6 = 6;
Matrix expected64 = (Matrix(6, 1) << 0, 0, 0, 3 * x4 * x4, 0, 0).finished();
Matrix61 actual64 = numericalDerivative64<Vector6, double, double, double,
double, double, double>(f6, x1, x2, x3, x4, x5, x6);
EXPECT(assert_equal(expected64, actual64, 1e-5));
}
/* ************************************************************************* */
//
TEST(testNumericalDerivative, numeriDerivative65) {
double x1 = 1, x2 = 2, x3 = 3 , x4 = 4, x5 = 5, x6 = 6;
Matrix expected65 = (Matrix(6, 1) << 0, 0, 0, 0, 0.5 / sqrt(x5), 0).finished();
Matrix61 actual65 = numericalDerivative65<Vector6, double, double, double,
double, double, double>(f6, x1, x2, x3, x4, x5, x6);
EXPECT(assert_equal(expected65, actual65, 1e-5));
}
/* ************************************************************************* */
//
TEST(testNumericalDerivative, numeriDerivative66) {
double x1 = 1, x2 = 2, x3 = 3 , x4 = 4, x5 = 5, x6 = 6;
Matrix expected66 = (Matrix(6, 1) << 0, 0, 0, 0, 0, cos(x6) + sin(x6)).finished();
Matrix61 actual66 = numericalDerivative66<Vector6, double, double, double,
double, double, double>(f6, x1, x2, x3, x4, x5, x6);
EXPECT(assert_equal(expected66, actual66, 1e-5));
}
/* ************************************************************************* */
int main() {
TestResult tr;

View File

@ -46,16 +46,20 @@ DiscreteConditional::DiscreteConditional(const size_t nrFrontals,
/* ******************************************************************************** */
DiscreteConditional::DiscreteConditional(const DecisionTreeFactor& joint,
const DecisionTreeFactor& marginal, const boost::optional<Ordering>& orderedKeys) :
const DecisionTreeFactor& marginal) :
BaseFactor(
ISDEBUG("DiscreteConditional::COUNT") ? joint : joint / marginal), BaseConditional(
joint.size()-marginal.size()) {
if (ISDEBUG("DiscreteConditional::DiscreteConditional"))
cout << (firstFrontalKey()) << endl; //TODO Print all keys
if (orderedKeys) {
keys_.clear();
keys_.insert(keys_.end(), orderedKeys->begin(), orderedKeys->end());
}
}
/* ******************************************************************************** */
DiscreteConditional::DiscreteConditional(const DecisionTreeFactor& joint,
const DecisionTreeFactor& marginal, const Ordering& orderedKeys) :
DiscreteConditional(joint, marginal) {
keys_.clear();
keys_.insert(keys_.end(), orderedKeys.begin(), orderedKeys.end());
}
/* ******************************************************************************** */

View File

@ -60,7 +60,11 @@ public:
/** construct P(X|Y)=P(X,Y)/P(Y) from P(X,Y) and P(Y) */
DiscreteConditional(const DecisionTreeFactor& joint,
const DecisionTreeFactor& marginal, const boost::optional<Ordering>& orderedKeys = boost::none);
const DecisionTreeFactor& marginal);
/** construct P(X|Y)=P(X,Y)/P(Y) from P(X,Y) and P(Y) */
DiscreteConditional(const DecisionTreeFactor& joint,
const DecisionTreeFactor& marginal, const Ordering& orderedKeys);
/**
* Combine several conditional into a single one.

View File

@ -262,7 +262,7 @@ namespace gtsam {
// Now, marginalize out everything that is not variable j
BayesNetType marginalBN = *cliqueMarginal.marginalMultifrontalBayesNet(
Ordering(cref_list_of<1,Key>(j)), boost::none, function);
Ordering(cref_list_of<1,Key>(j)), function);
// The Bayes net should contain only one conditional for variable j, so return it
return marginalBN.front();
@ -383,7 +383,7 @@ namespace gtsam {
}
// now, marginalize out everything that is not variable j1 or j2
return p_BC1C2.marginalMultifrontalBayesNet(Ordering(cref_list_of<2,Key>(j1)(j2)), boost::none, function);
return p_BC1C2.marginalMultifrontalBayesNet(Ordering(cref_list_of<2,Key>(j1)(j2)), function);
}
/* ************************************************************************* */

View File

@ -171,7 +171,7 @@ namespace gtsam {
// The variables we want to keepSet are exactly the ones in S
KeyVector indicesS(this->conditional()->beginParents(), this->conditional()->endParents());
cachedSeparatorMarginal_ = *p_Cp.marginalMultifrontalBayesNet(Ordering(indicesS), boost::none, function);
cachedSeparatorMarginal_ = *p_Cp.marginalMultifrontalBayesNet(Ordering(indicesS), function);
}
}

View File

@ -28,33 +28,19 @@ namespace gtsam {
template<class FACTORGRAPH>
boost::shared_ptr<typename EliminateableFactorGraph<FACTORGRAPH>::BayesNetType>
EliminateableFactorGraph<FACTORGRAPH>::eliminateSequential(
OptionalOrdering ordering, const Eliminate& function,
OptionalVariableIndex variableIndex, OptionalOrderingType orderingType) const
{
if(ordering && variableIndex) {
gttic(eliminateSequential);
// Do elimination
EliminationTreeType etree(asDerived(), *variableIndex, *ordering);
boost::shared_ptr<BayesNetType> bayesNet;
boost::shared_ptr<FactorGraphType> factorGraph;
boost::tie(bayesNet,factorGraph) = etree.eliminate(function);
// If any factors are remaining, the ordering was incomplete
if(!factorGraph->empty())
throw InconsistentEliminationRequested();
// Return the Bayes net
return bayesNet;
}
else if(!variableIndex) {
OptionalOrderingType orderingType, const Eliminate& function,
OptionalVariableIndex variableIndex) const {
if(!variableIndex) {
// If no VariableIndex provided, compute one and call this function again IMPORTANT: we check
// for no variable index first so that it's always computed if we need to call COLAMD because
// no Ordering is provided.
// no Ordering is provided. When removing optional from VariableIndex, create VariableIndex
// before creating ordering.
VariableIndex computedVariableIndex(asDerived());
return eliminateSequential(ordering, function, computedVariableIndex, orderingType);
return eliminateSequential(function, computedVariableIndex, orderingType);
}
else /*if(!ordering)*/ {
// If no Ordering provided, compute one and call this function again. We are guaranteed to
// have a VariableIndex already here because we computed one if needed in the previous 'else'
// block.
else {
// Compute an ordering and call this function again. We are guaranteed to have a
// VariableIndex already here because we computed one if needed in the previous 'if' block.
if (orderingType == Ordering::METIS) {
Ordering computedOrdering = Ordering::Metis(asDerived());
return eliminateSequential(computedOrdering, function, variableIndex, orderingType);
@ -65,17 +51,75 @@ namespace gtsam {
}
}
/* ************************************************************************* */
template<class FACTORGRAPH>
boost::shared_ptr<typename EliminateableFactorGraph<FACTORGRAPH>::BayesNetType>
EliminateableFactorGraph<FACTORGRAPH>::eliminateSequential(
const Ordering& ordering, const Eliminate& function,
OptionalVariableIndex variableIndex) const
{
if(!variableIndex) {
// If no VariableIndex provided, compute one and call this function again
VariableIndex computedVariableIndex(asDerived());
return eliminateSequential(ordering, function, computedVariableIndex);
} else {
gttic(eliminateSequential);
// Do elimination
EliminationTreeType etree(asDerived(), *variableIndex, ordering);
boost::shared_ptr<BayesNetType> bayesNet;
boost::shared_ptr<FactorGraphType> factorGraph;
boost::tie(bayesNet,factorGraph) = etree.eliminate(function);
// If any factors are remaining, the ordering was incomplete
if(!factorGraph->empty())
throw InconsistentEliminationRequested();
// Return the Bayes net
return bayesNet;
}
}
/* ************************************************************************* */
template<class FACTORGRAPH>
boost::shared_ptr<typename EliminateableFactorGraph<FACTORGRAPH>::BayesTreeType>
EliminateableFactorGraph<FACTORGRAPH>::eliminateMultifrontal(
OptionalOrdering ordering, const Eliminate& function,
OptionalVariableIndex variableIndex, OptionalOrderingType orderingType) const
OptionalOrderingType orderingType, const Eliminate& function,
OptionalVariableIndex variableIndex) const
{
if(ordering && variableIndex) {
if(!variableIndex) {
// If no VariableIndex provided, compute one and call this function again IMPORTANT: we check
// for no variable index first so that it's always computed if we need to call COLAMD because
// no Ordering is provided. When removing optional from VariableIndex, create VariableIndex
// before creating ordering.
VariableIndex computedVariableIndex(asDerived());
return eliminateMultifrontal(function, computedVariableIndex, orderingType);
}
else {
// Compute an ordering and call this function again. We are guaranteed to have a
// VariableIndex already here because we computed one if needed in the previous 'if' block.
if (orderingType == Ordering::METIS) {
Ordering computedOrdering = Ordering::Metis(asDerived());
return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType);
} else {
Ordering computedOrdering = Ordering::Colamd(*variableIndex);
return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType);
}
}
}
/* ************************************************************************* */
template<class FACTORGRAPH>
boost::shared_ptr<typename EliminateableFactorGraph<FACTORGRAPH>::BayesTreeType>
EliminateableFactorGraph<FACTORGRAPH>::eliminateMultifrontal(
const Ordering& ordering, const Eliminate& function,
OptionalVariableIndex variableIndex) const
{
if(!variableIndex) {
// If no VariableIndex provided, compute one and call this function again
VariableIndex computedVariableIndex(asDerived());
return eliminateMultifrontal(ordering, function, computedVariableIndex);
} else {
gttic(eliminateMultifrontal);
// Do elimination with given ordering
EliminationTreeType etree(asDerived(), *variableIndex, *ordering);
EliminationTreeType etree(asDerived(), *variableIndex, ordering);
JunctionTreeType junctionTree(etree);
boost::shared_ptr<BayesTreeType> bayesTree;
boost::shared_ptr<FactorGraphType> factorGraph;
@ -86,25 +130,6 @@ namespace gtsam {
// Return the Bayes tree
return bayesTree;
}
else if(!variableIndex) {
// If no VariableIndex provided, compute one and call this function again IMPORTANT: we check
// for no variable index first so that it's always computed if we need to call COLAMD because
// no Ordering is provided.
VariableIndex computedVariableIndex(asDerived());
return eliminateMultifrontal(ordering, function, computedVariableIndex, orderingType);
}
else /*if(!ordering)*/ {
// If no Ordering provided, compute one and call this function again. We are guaranteed to
// have a VariableIndex already here because we computed one if needed in the previous 'else'
// block.
if (orderingType == Ordering::METIS) {
Ordering computedOrdering = Ordering::Metis(asDerived());
return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType);
} else {
Ordering computedOrdering = Ordering::Colamd(*variableIndex);
return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType);
}
}
}
/* ************************************************************************* */
@ -191,57 +216,65 @@ namespace gtsam {
boost::shared_ptr<typename EliminateableFactorGraph<FACTORGRAPH>::BayesNetType>
EliminateableFactorGraph<FACTORGRAPH>::marginalMultifrontalBayesNet(
boost::variant<const Ordering&, const KeyVector&> variables,
OptionalOrdering marginalizedVariableOrdering,
const Eliminate& function, OptionalVariableIndex variableIndex) const
{
if(variableIndex)
{
if(marginalizedVariableOrdering)
{
gttic(marginalMultifrontalBayesNet);
// An ordering was provided for the marginalized variables, so we can first eliminate them
// in the order requested.
boost::shared_ptr<BayesTreeType> bayesTree;
boost::shared_ptr<FactorGraphType> factorGraph;
boost::tie(bayesTree,factorGraph) =
eliminatePartialMultifrontal(*marginalizedVariableOrdering, function, *variableIndex);
if(const Ordering* varsAsOrdering = boost::get<const Ordering&>(&variables))
{
// An ordering was also provided for the unmarginalized variables, so we can also
// eliminate them in the order requested.
return factorGraph->eliminateSequential(*varsAsOrdering, function);
}
else
{
// No ordering was provided for the unmarginalized variables, so order them with COLAMD.
return factorGraph->eliminateSequential(boost::none, function);
}
}
else
{
// No ordering was provided for the marginalized variables, so order them using constrained
// COLAMD.
bool unmarginalizedAreOrdered = (boost::get<const Ordering&>(&variables) != 0);
const KeyVector* variablesOrOrdering =
unmarginalizedAreOrdered ?
boost::get<const Ordering&>(&variables) : boost::get<const KeyVector&>(&variables);
Ordering totalOrdering =
Ordering::ColamdConstrainedLast(*variableIndex, *variablesOrOrdering, unmarginalizedAreOrdered);
// Split up ordering
const size_t nVars = variablesOrOrdering->size();
Ordering marginalizationOrdering(totalOrdering.begin(), totalOrdering.end() - nVars);
Ordering marginalVarsOrdering(totalOrdering.end() - nVars, totalOrdering.end());
// Call this function again with the computed orderings
return marginalMultifrontalBayesNet(marginalVarsOrdering, marginalizationOrdering, function, *variableIndex);
}
if(!variableIndex) {
// If no variable index is provided, compute one and call this function again
VariableIndex index(asDerived());
return marginalMultifrontalBayesNet(variables, function, index);
} else {
// No ordering was provided for the marginalized variables, so order them using constrained
// COLAMD.
bool unmarginalizedAreOrdered = (boost::get<const Ordering&>(&variables) != 0);
const KeyVector* variablesOrOrdering =
unmarginalizedAreOrdered ?
boost::get<const Ordering&>(&variables) : boost::get<const KeyVector&>(&variables);
Ordering totalOrdering =
Ordering::ColamdConstrainedLast(*variableIndex, *variablesOrOrdering, unmarginalizedAreOrdered);
// Split up ordering
const size_t nVars = variablesOrOrdering->size();
Ordering marginalizationOrdering(totalOrdering.begin(), totalOrdering.end() - nVars);
Ordering marginalVarsOrdering(totalOrdering.end() - nVars, totalOrdering.end());
// Call this function again with the computed orderings
return marginalMultifrontalBayesNet(marginalVarsOrdering, marginalizationOrdering, function, *variableIndex);
}
}
/* ************************************************************************* */
template<class FACTORGRAPH>
boost::shared_ptr<typename EliminateableFactorGraph<FACTORGRAPH>::BayesNetType>
EliminateableFactorGraph<FACTORGRAPH>::marginalMultifrontalBayesNet(
boost::variant<const Ordering&, const KeyVector&> variables,
const Ordering& marginalizedVariableOrdering,
const Eliminate& function, OptionalVariableIndex variableIndex) const
{
if(!variableIndex) {
// If no variable index is provided, compute one and call this function again
VariableIndex index(asDerived());
return marginalMultifrontalBayesNet(variables, marginalizedVariableOrdering, function, index);
} else {
gttic(marginalMultifrontalBayesNet);
// An ordering was provided for the marginalized variables, so we can first eliminate them
// in the order requested.
boost::shared_ptr<BayesTreeType> bayesTree;
boost::shared_ptr<FactorGraphType> factorGraph;
boost::tie(bayesTree,factorGraph) =
eliminatePartialMultifrontal(marginalizedVariableOrdering, function, *variableIndex);
if(const Ordering* varsAsOrdering = boost::get<const Ordering&>(&variables))
{
// An ordering was also provided for the unmarginalized variables, so we can also
// eliminate them in the order requested.
return factorGraph->eliminateSequential(*varsAsOrdering, function);
}
else
{
// No ordering was provided for the unmarginalized variables, so order them with COLAMD.
return factorGraph->eliminateSequential(function);
}
}
}
@ -250,57 +283,65 @@ namespace gtsam {
boost::shared_ptr<typename EliminateableFactorGraph<FACTORGRAPH>::BayesTreeType>
EliminateableFactorGraph<FACTORGRAPH>::marginalMultifrontalBayesTree(
boost::variant<const Ordering&, const KeyVector&> variables,
OptionalOrdering marginalizedVariableOrdering,
const Eliminate& function, OptionalVariableIndex variableIndex) const
{
if(variableIndex)
{
if(marginalizedVariableOrdering)
{
gttic(marginalMultifrontalBayesTree);
// An ordering was provided for the marginalized variables, so we can first eliminate them
// in the order requested.
boost::shared_ptr<BayesTreeType> bayesTree;
boost::shared_ptr<FactorGraphType> factorGraph;
boost::tie(bayesTree,factorGraph) =
eliminatePartialMultifrontal(*marginalizedVariableOrdering, function, *variableIndex);
if(const Ordering* varsAsOrdering = boost::get<const Ordering&>(&variables))
{
// An ordering was also provided for the unmarginalized variables, so we can also
// eliminate them in the order requested.
return factorGraph->eliminateMultifrontal(*varsAsOrdering, function);
}
else
{
// No ordering was provided for the unmarginalized variables, so order them with COLAMD.
return factorGraph->eliminateMultifrontal(boost::none, function);
}
}
else
{
// No ordering was provided for the marginalized variables, so order them using constrained
// COLAMD.
bool unmarginalizedAreOrdered = (boost::get<const Ordering&>(&variables) != 0);
const KeyVector* variablesOrOrdering =
unmarginalizedAreOrdered ?
boost::get<const Ordering&>(&variables) : boost::get<const KeyVector&>(&variables);
Ordering totalOrdering =
Ordering::ColamdConstrainedLast(*variableIndex, *variablesOrOrdering, unmarginalizedAreOrdered);
// Split up ordering
const size_t nVars = variablesOrOrdering->size();
Ordering marginalizationOrdering(totalOrdering.begin(), totalOrdering.end() - nVars);
Ordering marginalVarsOrdering(totalOrdering.end() - nVars, totalOrdering.end());
// Call this function again with the computed orderings
return marginalMultifrontalBayesTree(marginalVarsOrdering, marginalizationOrdering, function, *variableIndex);
}
if(!variableIndex) {
// If no variable index is provided, compute one and call this function again
VariableIndex computedVariableIndex(asDerived());
return marginalMultifrontalBayesTree(variables, function, computedVariableIndex);
} else {
// No ordering was provided for the marginalized variables, so order them using constrained
// COLAMD.
bool unmarginalizedAreOrdered = (boost::get<const Ordering&>(&variables) != 0);
const KeyVector* variablesOrOrdering =
unmarginalizedAreOrdered ?
boost::get<const Ordering&>(&variables) : boost::get<const KeyVector&>(&variables);
Ordering totalOrdering =
Ordering::ColamdConstrainedLast(*variableIndex, *variablesOrOrdering, unmarginalizedAreOrdered);
// Split up ordering
const size_t nVars = variablesOrOrdering->size();
Ordering marginalizationOrdering(totalOrdering.begin(), totalOrdering.end() - nVars);
Ordering marginalVarsOrdering(totalOrdering.end() - nVars, totalOrdering.end());
// Call this function again with the computed orderings
return marginalMultifrontalBayesTree(marginalVarsOrdering, marginalizationOrdering, function, *variableIndex);
}
}
/* ************************************************************************* */
template<class FACTORGRAPH>
boost::shared_ptr<typename EliminateableFactorGraph<FACTORGRAPH>::BayesTreeType>
EliminateableFactorGraph<FACTORGRAPH>::marginalMultifrontalBayesTree(
boost::variant<const Ordering&, const KeyVector&> variables,
const Ordering& marginalizedVariableOrdering,
const Eliminate& function, OptionalVariableIndex variableIndex) const
{
if(!variableIndex) {
// If no variable index is provided, compute one and call this function again
VariableIndex computedVariableIndex(asDerived());
return marginalMultifrontalBayesTree(variables, marginalizedVariableOrdering, function, computedVariableIndex);
} else {
gttic(marginalMultifrontalBayesTree);
// An ordering was provided for the marginalized variables, so we can first eliminate them
// in the order requested.
boost::shared_ptr<BayesTreeType> bayesTree;
boost::shared_ptr<FactorGraphType> factorGraph;
boost::tie(bayesTree,factorGraph) =
eliminatePartialMultifrontal(marginalizedVariableOrdering, function, *variableIndex);
if(const Ordering* varsAsOrdering = boost::get<const Ordering&>(&variables))
{
// An ordering was also provided for the unmarginalized variables, so we can also
// eliminate them in the order requested.
return factorGraph->eliminateMultifrontal(*varsAsOrdering, function);
}
else
{
// No ordering was provided for the unmarginalized variables, so order them with COLAMD.
return factorGraph->eliminateMultifrontal(function);
}
}
}

View File

@ -88,9 +88,6 @@ namespace gtsam {
/// The function type that does a single dense elimination step on a subgraph.
typedef boost::function<EliminationResult(const FactorGraphType&, const Ordering&)> Eliminate;
/// Typedef for an optional ordering as an argument to elimination functions
typedef boost::optional<const Ordering&> OptionalOrdering;
/// Typedef for an optional variable index as an argument to elimination functions
typedef boost::optional<const VariableIndex&> OptionalVariableIndex;
@ -108,24 +105,38 @@ namespace gtsam {
* <b> Example - METIS ordering for elimination
* \code
* boost::shared_ptr<GaussianBayesNet> result = graph.eliminateSequential(OrderingType::METIS);
*
* <b> Example - Full QR elimination in specified order:
* \code
* boost::shared_ptr<GaussianBayesNet> result = graph.eliminateSequential(EliminateQR, myOrdering);
* \endcode
*
* <b> Example - Reusing an existing VariableIndex to improve performance, and using COLAMD ordering: </b>
* \code
* VariableIndex varIndex(graph); // Build variable index
* Data data = otherFunctionUsingVariableIndex(graph, varIndex); // Other code that uses variable index
* boost::shared_ptr<GaussianBayesNet> result = graph.eliminateSequential(EliminateQR, boost::none, varIndex);
* boost::shared_ptr<GaussianBayesNet> result = graph.eliminateSequential(EliminateQR, varIndex, boost::none);
* \endcode
* */
boost::shared_ptr<BayesNetType> eliminateSequential(
OptionalOrdering ordering = boost::none,
OptionalOrderingType orderingType = boost::none,
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
OptionalVariableIndex variableIndex = boost::none,
OptionalOrderingType orderingType = boost::none) const;
OptionalVariableIndex variableIndex = boost::none) const;
/** Do sequential elimination of all variables to produce a Bayes net.
*
* <b> Example - Full QR elimination in specified order:
* \code
* boost::shared_ptr<GaussianBayesNet> result = graph.eliminateSequential(myOrdering, EliminateQR);
* \endcode
*
* <b> Example - Reusing an existing VariableIndex to improve performance: </b>
* \code
* VariableIndex varIndex(graph); // Build variable index
* Data data = otherFunctionUsingVariableIndex(graph, varIndex); // Other code that uses variable index
* boost::shared_ptr<GaussianBayesNet> result = graph.eliminateSequential(myOrdering, EliminateQR, varIndex, boost::none);
* \endcode
* */
boost::shared_ptr<BayesNetType> eliminateSequential(
const Ordering& ordering,
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
OptionalVariableIndex variableIndex = boost::none) const;
/** Do multifrontal elimination of all variables to produce a Bayes tree. If an ordering is not
* provided, the ordering will be computed using either COLAMD or METIS, dependeing on
@ -136,11 +147,6 @@ namespace gtsam {
* boost::shared_ptr<GaussianBayesTree> result = graph.eliminateMultifrontal(EliminateCholesky);
* \endcode
*
* <b> Example - Full QR elimination in specified order:
* \code
* boost::shared_ptr<GaussianBayesTree> result = graph.eliminateMultifrontal(EliminateQR, myOrdering);
* \endcode
*
* <b> Example - Reusing an existing VariableIndex to improve performance, and using COLAMD ordering: </b>
* \code
* VariableIndex varIndex(graph); // Build variable index
@ -149,10 +155,23 @@ namespace gtsam {
* \endcode
* */
boost::shared_ptr<BayesTreeType> eliminateMultifrontal(
OptionalOrdering ordering = boost::none,
OptionalOrderingType orderingType = boost::none,
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
OptionalVariableIndex variableIndex = boost::none,
OptionalOrderingType orderingType = boost::none) const;
OptionalVariableIndex variableIndex = boost::none) const;
/** Do multifrontal elimination of all variables to produce a Bayes tree. If an ordering is not
* provided, the ordering will be computed using either COLAMD or METIS, dependeing on
* the parameter orderingType (Ordering::COLAMD or Ordering::METIS)
*
* <b> Example - Full QR elimination in specified order:
* \code
* boost::shared_ptr<GaussianBayesTree> result = graph.eliminateMultifrontal(EliminateQR, myOrdering);
* \endcode
* */
boost::shared_ptr<BayesTreeType> eliminateMultifrontal(
const Ordering& ordering,
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
OptionalVariableIndex variableIndex = boost::none) const;
/** Do sequential elimination of some variables, in \c ordering provided, to produce a Bayes net
* and a remaining factor graph. This computes the factorization \f$ p(X) = p(A|B) p(B) \f$,
@ -194,20 +213,47 @@ namespace gtsam {
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
OptionalVariableIndex variableIndex = boost::none) const;
/** Compute the marginal of the requested variables and return the result as a Bayes net.
/** Compute the marginal of the requested variables and return the result as a Bayes net. Uses
* COLAMD marginalization ordering by default
* @param variables Determines the variables whose marginal to compute, if provided as an
* Ordering they will be ordered in the returned BayesNet as specified, and if provided
* as a KeyVector they will be ordered using constrained COLAMD.
* @param marginalizedVariableOrdering Optional ordering for the variables being marginalized
* out, i.e. all variables not in \c variables. If this is boost::none, the ordering
* will be computed with COLAMD.
* @param function Optional dense elimination function, if not provided the default will be
* used.
* @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not
* provided one will be computed. */
boost::shared_ptr<BayesNetType> marginalMultifrontalBayesNet(
boost::variant<const Ordering&, const KeyVector&> variables,
OptionalOrdering marginalizedVariableOrdering = boost::none,
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
OptionalVariableIndex variableIndex = boost::none) const;
/** Compute the marginal of the requested variables and return the result as a Bayes net.
* @param variables Determines the variables whose marginal to compute, if provided as an
* Ordering they will be ordered in the returned BayesNet as specified, and if provided
* as a KeyVector they will be ordered using constrained COLAMD.
* @param marginalizedVariableOrdering Ordering for the variables being marginalized out,
* i.e. all variables not in \c variables.
* @param function Optional dense elimination function, if not provided the default will be
* used.
* @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not
* provided one will be computed. */
boost::shared_ptr<BayesNetType> marginalMultifrontalBayesNet(
boost::variant<const Ordering&, const KeyVector&> variables,
const Ordering& marginalizedVariableOrdering,
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
OptionalVariableIndex variableIndex = boost::none) const;
/** Compute the marginal of the requested variables and return the result as a Bayes tree. Uses
* COLAMD marginalization order by default
* @param variables Determines the variables whose marginal to compute, if provided as an
* Ordering they will be ordered in the returned BayesNet as specified, and if provided
* as a KeyVector they will be ordered using constrained COLAMD.
* @param function Optional dense elimination function, if not provided the default will be
* used.
* @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not
* provided one will be computed. */
boost::shared_ptr<BayesTreeType> marginalMultifrontalBayesTree(
boost::variant<const Ordering&, const KeyVector&> variables,
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
OptionalVariableIndex variableIndex = boost::none) const;
@ -215,16 +261,15 @@ namespace gtsam {
* @param variables Determines the variables whose marginal to compute, if provided as an
* Ordering they will be ordered in the returned BayesNet as specified, and if provided
* as a KeyVector they will be ordered using constrained COLAMD.
* @param marginalizedVariableOrdering Optional ordering for the variables being marginalized
* out, i.e. all variables not in \c variables. If this is boost::none, the ordering
* will be computed with COLAMD.
* @param marginalizedVariableOrdering Ordering for the variables being marginalized out,
* i.e. all variables not in \c variables.
* @param function Optional dense elimination function, if not provided the default will be
* used.
* @param variableIndex Optional pre-computed VariableIndex for the factor graph, if not
* provided one will be computed. */
boost::shared_ptr<BayesTreeType> marginalMultifrontalBayesTree(
boost::variant<const Ordering&, const KeyVector&> variables,
OptionalOrdering marginalizedVariableOrdering = boost::none,
const Ordering& marginalizedVariableOrdering,
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
OptionalVariableIndex variableIndex = boost::none) const;
@ -241,6 +286,59 @@ namespace gtsam {
// Access the derived factor graph class
FactorGraphType& asDerived() { return static_cast<FactorGraphType&>(*this); }
public:
/** \deprecated ordering and orderingType shouldn't both be specified */
boost::shared_ptr<BayesNetType> eliminateSequential(
const Ordering& ordering,
const Eliminate& function,
OptionalVariableIndex variableIndex,
OptionalOrderingType orderingType) const {
return eliminateSequential(ordering, function, variableIndex);
}
/** \deprecated orderingType specified first for consistency */
boost::shared_ptr<BayesNetType> eliminateSequential(
const Eliminate& function,
OptionalVariableIndex variableIndex = boost::none,
OptionalOrderingType orderingType = boost::none) const {
return eliminateSequential(orderingType, function, variableIndex);
}
/** \deprecated ordering and orderingType shouldn't both be specified */
boost::shared_ptr<BayesTreeType> eliminateMultifrontal(
const Ordering& ordering,
const Eliminate& function,
OptionalVariableIndex variableIndex,
OptionalOrderingType orderingType) const {
return eliminateMultifrontal(ordering, function, variableIndex);
}
/** \deprecated orderingType specified first for consistency */
boost::shared_ptr<BayesTreeType> eliminateMultifrontal(
const Eliminate& function,
OptionalVariableIndex variableIndex = boost::none,
OptionalOrderingType orderingType = boost::none) const {
return eliminateMultifrontal(orderingType, function, variableIndex);
}
/** \deprecated */
boost::shared_ptr<BayesNetType> marginalMultifrontalBayesNet(
boost::variant<const Ordering&, const KeyVector&> variables,
boost::none_t,
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
OptionalVariableIndex variableIndex = boost::none) const {
return marginalMultifrontalBayesNet(variables, function, variableIndex);
}
/** \deprecated */
boost::shared_ptr<BayesTreeType> marginalMultifrontalBayesTree(
boost::variant<const Ordering&, const KeyVector&> variables,
boost::none_t,
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
OptionalVariableIndex variableIndex = boost::none) const {
return marginalMultifrontalBayesTree(variables, function, variableIndex);
}
};
}

View File

@ -213,11 +213,21 @@ Ordering Ordering::Metis(const MetisIndex& met) {
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
gttic(Ordering_METIS);
idx_t size = met.nValues();
if (size == 0)
{
return Ordering();
}
if (size == 1)
{
return Ordering(KeyVector(1, met.intToKey(0)));
}
vector<idx_t> xadj = met.xadj();
vector<idx_t> adj = met.adj();
vector<idx_t> perm, iperm;
idx_t size = met.nValues();
for (idx_t i = 0; i < size; i++) {
perm.push_back(0);
iperm.push_back(0);

View File

@ -191,7 +191,10 @@ public:
template<class FACTOR_GRAPH>
static Ordering Metis(const FACTOR_GRAPH& graph) {
return Metis(MetisIndex(graph));
if (graph.empty())
return Ordering();
else
return Metis(MetisIndex(graph));
}
/// @}

View File

@ -99,7 +99,9 @@ VariableSlots::VariableSlots(const FG& factorGraph)
// factor does not involve that variable.
size_t jointFactorPos = 0;
for(const typename FG::sharedFactor& factor: factorGraph) {
assert(factor);
if (!factor) {
continue;
}
size_t factorVarSlot = 0;
for(const Key involvedVariable: *factor) {
// Set the slot in this factor for this variable. If the
@ -111,7 +113,7 @@ VariableSlots::VariableSlots(const FG& factorGraph)
iterator thisVarSlots; bool inserted;
boost::tie(thisVarSlots, inserted) = this->insert(std::make_pair(involvedVariable, FastVector<size_t>()));
if(inserted)
thisVarSlots->second.resize(factorGraph.size(), Empty);
thisVarSlots->second.resize(factorGraph.nrFactors(), Empty);
thisVarSlots->second[jointFactorPos] = factorVarSlot;
if(debug) std::cout << " var " << involvedVariable << " rowblock " << jointFactorPos << " comes from factor's slot " << factorVarSlot << std::endl;
++ factorVarSlot;

View File

@ -294,6 +294,28 @@ TEST(Ordering, MetisLoop) {
}
#endif
/* ************************************************************************* */
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
TEST(Ordering, MetisEmptyGraph) {
SymbolicFactorGraph symbolicGraph;
Ordering actual = Ordering::Create(Ordering::METIS, symbolicGraph);
Ordering expected;
EXPECT(assert_equal(expected, actual));
}
#endif
/* ************************************************************************* */
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
TEST(Ordering, MetisSingleNode) {
// create graph with a single node
SymbolicFactorGraph symbolicGraph;
symbolicGraph.push_factor(7);
Ordering actual = Ordering::Create(Ordering::METIS, symbolicGraph);
Ordering expected = Ordering(list_of(7));
EXPECT(assert_equal(expected, actual));
}
#endif
/* ************************************************************************* */
TEST(Ordering, Create) {
// create chain graph

View File

@ -156,16 +156,17 @@ namespace gtsam {
}
/* ************************************************************************* */
pair<Matrix, Vector> GaussianBayesNet::matrix(boost::optional<const Ordering&> ordering) const {
if (ordering) {
// Convert to a GaussianFactorGraph and use its machinery
GaussianFactorGraph factorGraph(*this);
return factorGraph.jacobian(ordering);
} else {
// recursively call with default ordering
const auto defaultOrdering = this->ordering();
return matrix(defaultOrdering);
}
pair<Matrix, Vector> GaussianBayesNet::matrix(const Ordering& ordering) const {
// Convert to a GaussianFactorGraph and use its machinery
GaussianFactorGraph factorGraph(*this);
return factorGraph.jacobian(ordering);
}
/* ************************************************************************* */
pair<Matrix, Vector> GaussianBayesNet::matrix() const {
// recursively call with default ordering
const auto defaultOrdering = this->ordering();
return matrix(defaultOrdering);
}
///* ************************************************************************* */
@ -187,16 +188,16 @@ namespace gtsam {
}
/* ************************************************************************* */
double GaussianBayesNet::logDeterminant() const
{
double GaussianBayesNet::logDeterminant() const {
double logDet = 0.0;
for(const sharedConditional& cg: *this) {
if(cg->get_model()) {
for (const sharedConditional& cg : *this) {
if (cg->get_model()) {
Vector diag = cg->R().diagonal();
cg->get_model()->whitenInPlace(diag);
logDet += diag.unaryExpr([](double c){return log(c);}).sum();
logDet += diag.unaryExpr([](double x) { return log(x); }).sum();
} else {
logDet += cg->R().diagonal().unaryExpr([](double c){return log(c);}).sum();
logDet +=
cg->R().diagonal().unaryExpr([](double x) { return log(x); }).sum();
}
}
return logDet;

View File

@ -92,7 +92,14 @@ namespace gtsam {
* Will return upper-triangular matrix only when using 'ordering' above.
* In case Bayes net is incomplete zero columns are added to the end.
*/
std::pair<Matrix, Vector> matrix(boost::optional<const Ordering&> ordering = boost::none) const;
std::pair<Matrix, Vector> matrix(const Ordering& ordering) const;
/**
* Return (dense) upper-triangular matrix representation
* Will return upper-triangular matrix only when using 'ordering' above.
* In case Bayes net is incomplete zero columns are added to the end.
*/
std::pair<Matrix, Vector> matrix() const;
/**
* Optimize along the gradient direction, with a closed-form computation to perform the line

View File

@ -40,11 +40,11 @@ namespace gtsam {
parentSum += clique->conditional()
->R()
.diagonal()
.unaryExpr(std::ptr_fun<double, double>(log))
.unaryExpr([](double x) { return log(x); })
.sum();
return 0;
}
}
} // namespace internal
/* ************************************************************************* */
bool GaussianBayesTree::equals(const This& other, double tol) const

View File

@ -190,33 +190,62 @@ namespace gtsam {
/* ************************************************************************* */
Matrix GaussianFactorGraph::augmentedJacobian(
boost::optional<const Ordering&> optionalOrdering) const {
const Ordering& ordering) const {
// combine all factors
JacobianFactor combined(*this, optionalOrdering);
JacobianFactor combined(*this, ordering);
return combined.augmentedJacobian();
}
/* ************************************************************************* */
Matrix GaussianFactorGraph::augmentedJacobian() const {
// combine all factors
JacobianFactor combined(*this);
return combined.augmentedJacobian();
}
/* ************************************************************************* */
pair<Matrix, Vector> GaussianFactorGraph::jacobian(
boost::optional<const Ordering&> optionalOrdering) const {
Matrix augmented = augmentedJacobian(optionalOrdering);
const Ordering& ordering) const {
Matrix augmented = augmentedJacobian(ordering);
return make_pair(augmented.leftCols(augmented.cols() - 1),
augmented.col(augmented.cols() - 1));
}
/* ************************************************************************* */
pair<Matrix, Vector> GaussianFactorGraph::jacobian() const {
Matrix augmented = augmentedJacobian();
return make_pair(augmented.leftCols(augmented.cols() - 1),
augmented.col(augmented.cols() - 1));
}
/* ************************************************************************* */
Matrix GaussianFactorGraph::augmentedHessian(
boost::optional<const Ordering&> optionalOrdering) const {
const Ordering& ordering) const {
// combine all factors and get upper-triangular part of Hessian
Scatter scatter(*this, optionalOrdering);
Scatter scatter(*this, ordering);
HessianFactor combined(*this, scatter);
return combined.info().selfadjointView();;
}
/* ************************************************************************* */
Matrix GaussianFactorGraph::augmentedHessian() const {
// combine all factors and get upper-triangular part of Hessian
Scatter scatter(*this);
HessianFactor combined(*this, scatter);
return combined.info().selfadjointView();;
}
/* ************************************************************************* */
pair<Matrix, Vector> GaussianFactorGraph::hessian(
boost::optional<const Ordering&> optionalOrdering) const {
Matrix augmented = augmentedHessian(optionalOrdering);
const Ordering& ordering) const {
Matrix augmented = augmentedHessian(ordering);
size_t n = augmented.rows() - 1;
return make_pair(augmented.topLeftCorner(n, n), augmented.topRightCorner(n, 1));
}
/* ************************************************************************* */
pair<Matrix, Vector> GaussianFactorGraph::hessian() const {
Matrix augmented = augmentedHessian();
size_t n = augmented.rows() - 1;
return make_pair(augmented.topLeftCorner(n, n), augmented.topRightCorner(n, 1));
}
@ -253,8 +282,13 @@ namespace gtsam {
}
/* ************************************************************************* */
VectorValues GaussianFactorGraph::optimize(OptionalOrdering ordering, const Eliminate& function) const
{
VectorValues GaussianFactorGraph::optimize(const Eliminate& function) const {
gttic(GaussianFactorGraph_optimize);
return BaseEliminateable::eliminateMultifrontal(function)->optimize();
}
/* ************************************************************************* */
VectorValues GaussianFactorGraph::optimize(const Ordering& ordering, const Eliminate& function) const {
gttic(GaussianFactorGraph_optimize);
return BaseEliminateable::eliminateMultifrontal(ordering, function)->optimize();
}
@ -460,4 +494,11 @@ namespace gtsam {
return e;
}
/* ************************************************************************* */
/** \deprecated */
VectorValues GaussianFactorGraph::optimize(boost::none_t,
const Eliminate& function) const {
return optimize(function);
}
} // namespace gtsam

View File

@ -201,7 +201,16 @@ namespace gtsam {
* \f$ \frac{1}{2} \Vert Ax-b \Vert^2 \f$. See also
* GaussianFactorGraph::jacobian and GaussianFactorGraph::sparseJacobian.
*/
Matrix augmentedJacobian(boost::optional<const Ordering&> optionalOrdering = boost::none) const;
Matrix augmentedJacobian(const Ordering& ordering) const;
/**
* Return a dense \f$ [ \;A\;b\; ] \in \mathbb{R}^{m \times n+1} \f$
* Jacobian matrix, augmented with b with the noise models baked
* into A and b. The negative log-likelihood is
* \f$ \frac{1}{2} \Vert Ax-b \Vert^2 \f$. See also
* GaussianFactorGraph::jacobian and GaussianFactorGraph::sparseJacobian.
*/
Matrix augmentedJacobian() const;
/**
* Return the dense Jacobian \f$ A \f$ and right-hand-side \f$ b \f$,
@ -210,7 +219,16 @@ namespace gtsam {
* GaussianFactorGraph::augmentedJacobian and
* GaussianFactorGraph::sparseJacobian.
*/
std::pair<Matrix,Vector> jacobian(boost::optional<const Ordering&> optionalOrdering = boost::none) const;
std::pair<Matrix,Vector> jacobian(const Ordering& ordering) const;
/**
* Return the dense Jacobian \f$ A \f$ and right-hand-side \f$ b \f$,
* with the noise models baked into A and b. The negative log-likelihood
* is \f$ \frac{1}{2} \Vert Ax-b \Vert^2 \f$. See also
* GaussianFactorGraph::augmentedJacobian and
* GaussianFactorGraph::sparseJacobian.
*/
std::pair<Matrix,Vector> jacobian() const;
/**
* Return a dense \f$ \Lambda \in \mathbb{R}^{n+1 \times n+1} \f$ Hessian
@ -223,7 +241,20 @@ namespace gtsam {
and the negative log-likelihood is
\f$ \frac{1}{2} x^T \Lambda x + \eta^T x + c \f$.
*/
Matrix augmentedHessian(boost::optional<const Ordering&> optionalOrdering = boost::none) const;
Matrix augmentedHessian(const Ordering& ordering) const;
/**
* Return a dense \f$ \Lambda \in \mathbb{R}^{n+1 \times n+1} \f$ Hessian
* matrix, augmented with the information vector \f$ \eta \f$. The
* augmented Hessian is
\f[ \left[ \begin{array}{ccc}
\Lambda & \eta \\
\eta^T & c
\end{array} \right] \f]
and the negative log-likelihood is
\f$ \frac{1}{2} x^T \Lambda x + \eta^T x + c \f$.
*/
Matrix augmentedHessian() const;
/**
* Return the dense Hessian \f$ \Lambda \f$ and information vector
@ -231,7 +262,15 @@ namespace gtsam {
* is \frac{1}{2} x^T \Lambda x + \eta^T x + c. See also
* GaussianFactorGraph::augmentedHessian.
*/
std::pair<Matrix,Vector> hessian(boost::optional<const Ordering&> optionalOrdering = boost::none) const;
std::pair<Matrix,Vector> hessian(const Ordering& ordering) const;
/**
* Return the dense Hessian \f$ \Lambda \f$ and information vector
* \f$ \eta \f$, with the noise models baked in. The negative log-likelihood
* is \frac{1}{2} x^T \Lambda x + \eta^T x + c. See also
* GaussianFactorGraph::augmentedHessian.
*/
std::pair<Matrix,Vector> hessian() const;
/** Return only the diagonal of the Hessian A'*A, as a VectorValues */
virtual VectorValues hessianDiagonal() const;
@ -243,7 +282,14 @@ namespace gtsam {
* the dense elimination function specified in \c function (default EliminatePreferCholesky),
* followed by back-substitution in the Bayes tree resulting from elimination. Is equivalent
* to calling graph.eliminateMultifrontal()->optimize(). */
VectorValues optimize(OptionalOrdering ordering = boost::none,
VectorValues optimize(
const Eliminate& function = EliminationTraitsType::DefaultEliminate) const;
/** Solve the factor graph by performing multifrontal variable elimination in COLAMD order using
* the dense elimination function specified in \c function (default EliminatePreferCholesky),
* followed by back-substitution in the Bayes tree resulting from elimination. Is equivalent
* to calling graph.eliminateMultifrontal()->optimize(). */
VectorValues optimize(const Ordering&,
const Eliminate& function = EliminationTraitsType::DefaultEliminate) const;
/**
@ -329,6 +375,12 @@ namespace gtsam {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
}
public:
/** \deprecated */
VectorValues optimize(boost::none_t,
const Eliminate& function = EliminationTraitsType::DefaultEliminate) const;
};
/**

View File

@ -250,10 +250,10 @@ HessianFactor::HessianFactor(const GaussianFactor& gf) :
/* ************************************************************************* */
HessianFactor::HessianFactor(const GaussianFactorGraph& factors,
boost::optional<const Scatter&> scatter) {
const Scatter& scatter) {
gttic(HessianFactor_MergeConstructor);
Allocate(scatter ? *scatter : Scatter(factors));
Allocate(scatter);
// Form A' * A
gttic(update);

View File

@ -176,7 +176,11 @@ namespace gtsam {
/** Combine a set of factors into a single dense HessianFactor */
explicit HessianFactor(const GaussianFactorGraph& factors,
boost::optional<const Scatter&> scatter = boost::none);
const Scatter& scatter);
/** Combine a set of factors into a single dense HessianFactor */
explicit HessianFactor(const GaussianFactorGraph& factors)
: HessianFactor(factors, Scatter(factors)) {}
/** Destructor */
virtual ~HessianFactor() {}

View File

@ -220,60 +220,13 @@ FastVector<JacobianFactor::shared_ptr> _convertOrCastToJacobians(
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph,
boost::optional<const Ordering&> ordering,
boost::optional<const VariableSlots&> p_variableSlots) {
gttic(JacobianFactor_combine_constructor);
// Compute VariableSlots if one was not provided
// Binds reference, does not copy VariableSlots
const VariableSlots & variableSlots =
p_variableSlots ? p_variableSlots.get() : VariableSlots(graph);
void JacobianFactor::JacobianFactorHelper(const GaussianFactorGraph& graph,
const FastVector<VariableSlots::const_iterator>& orderedSlots) {
// Cast or convert to Jacobians
FastVector<JacobianFactor::shared_ptr> jacobians = _convertOrCastToJacobians(
graph);
gttic(Order_slots);
// Order variable slots - we maintain the vector of ordered slots, as well as keep a list
// 'unorderedSlots' of any variables discovered that are not in the ordering. Those will then
// be added after all of the ordered variables.
FastVector<VariableSlots::const_iterator> orderedSlots;
orderedSlots.reserve(variableSlots.size());
if (ordering) {
// If an ordering is provided, arrange the slots first that ordering
FastList<VariableSlots::const_iterator> unorderedSlots;
size_t nOrderingSlotsUsed = 0;
orderedSlots.resize(ordering->size());
FastMap<Key, size_t> inverseOrdering = ordering->invert();
for (VariableSlots::const_iterator item = variableSlots.begin();
item != variableSlots.end(); ++item) {
FastMap<Key, size_t>::const_iterator orderingPosition =
inverseOrdering.find(item->first);
if (orderingPosition == inverseOrdering.end()) {
unorderedSlots.push_back(item);
} else {
orderedSlots[orderingPosition->second] = item;
++nOrderingSlotsUsed;
}
}
if (nOrderingSlotsUsed != ordering->size())
throw std::invalid_argument(
"The ordering provided to the JacobianFactor combine constructor\n"
"contained extra variables that did not appear in the factors to combine.");
// Add the remaining slots
for(VariableSlots::const_iterator item: unorderedSlots) {
orderedSlots.push_back(item);
}
} else {
// If no ordering is provided, arrange the slots as they were, which will be sorted
// numerically since VariableSlots uses a map sorting on Key.
for (VariableSlots::const_iterator item = variableSlots.begin();
item != variableSlots.end(); ++item)
orderedSlots.push_back(item);
}
gttoc(Order_slots);
// Count dimensions
FastVector<DenseIndex> varDims;
DenseIndex m, n;
@ -344,6 +297,127 @@ JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph,
this->setModel(anyConstrained, *sigmas);
}
/* ************************************************************************* */
// Order variable slots - we maintain the vector of ordered slots, as well as keep a list
// 'unorderedSlots' of any variables discovered that are not in the ordering. Those will then
// be added after all of the ordered variables.
FastVector<VariableSlots::const_iterator> orderedSlotsHelper(
const Ordering& ordering,
const VariableSlots& variableSlots) {
gttic(Order_slots);
FastVector<VariableSlots::const_iterator> orderedSlots;
orderedSlots.reserve(variableSlots.size());
// If an ordering is provided, arrange the slots first that ordering
FastList<VariableSlots::const_iterator> unorderedSlots;
size_t nOrderingSlotsUsed = 0;
orderedSlots.resize(ordering.size());
FastMap<Key, size_t> inverseOrdering = ordering.invert();
for (VariableSlots::const_iterator item = variableSlots.begin();
item != variableSlots.end(); ++item) {
FastMap<Key, size_t>::const_iterator orderingPosition =
inverseOrdering.find(item->first);
if (orderingPosition == inverseOrdering.end()) {
unorderedSlots.push_back(item);
} else {
orderedSlots[orderingPosition->second] = item;
++nOrderingSlotsUsed;
}
}
if (nOrderingSlotsUsed != ordering.size())
throw std::invalid_argument(
"The ordering provided to the JacobianFactor combine constructor\n"
"contained extra variables that did not appear in the factors to combine.");
// Add the remaining slots
for(VariableSlots::const_iterator item: unorderedSlots) {
orderedSlots.push_back(item);
}
gttoc(Order_slots);
return orderedSlots;
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph) {
gttic(JacobianFactor_combine_constructor);
// Compute VariableSlots if one was not provided
// Binds reference, does not copy VariableSlots
const VariableSlots & variableSlots = VariableSlots(graph);
gttic(Order_slots);
// Order variable slots - we maintain the vector of ordered slots, as well as keep a list
// 'unorderedSlots' of any variables discovered that are not in the ordering. Those will then
// be added after all of the ordered variables.
FastVector<VariableSlots::const_iterator> orderedSlots;
orderedSlots.reserve(variableSlots.size());
// If no ordering is provided, arrange the slots as they were, which will be sorted
// numerically since VariableSlots uses a map sorting on Key.
for (VariableSlots::const_iterator item = variableSlots.begin();
item != variableSlots.end(); ++item)
orderedSlots.push_back(item);
gttoc(Order_slots);
JacobianFactorHelper(graph, orderedSlots);
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph,
const VariableSlots& p_variableSlots) {
gttic(JacobianFactor_combine_constructor);
// Binds reference, does not copy VariableSlots
const VariableSlots & variableSlots = p_variableSlots;
gttic(Order_slots);
// Order variable slots - we maintain the vector of ordered slots, as well as keep a list
// 'unorderedSlots' of any variables discovered that are not in the ordering. Those will then
// be added after all of the ordered variables.
FastVector<VariableSlots::const_iterator> orderedSlots;
orderedSlots.reserve(variableSlots.size());
// If no ordering is provided, arrange the slots as they were, which will be sorted
// numerically since VariableSlots uses a map sorting on Key.
for (VariableSlots::const_iterator item = variableSlots.begin();
item != variableSlots.end(); ++item)
orderedSlots.push_back(item);
gttoc(Order_slots);
JacobianFactorHelper(graph, orderedSlots);
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph,
const Ordering& ordering) {
gttic(JacobianFactor_combine_constructor);
// Compute VariableSlots if one was not provided
// Binds reference, does not copy VariableSlots
const VariableSlots & variableSlots = VariableSlots(graph);
// Order variable slots
FastVector<VariableSlots::const_iterator> orderedSlots =
orderedSlotsHelper(ordering, variableSlots);
JacobianFactorHelper(graph, orderedSlots);
}
/* ************************************************************************* */
JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph,
const Ordering& ordering,
const VariableSlots& p_variableSlots) {
gttic(JacobianFactor_combine_constructor);
// Order variable slots
FastVector<VariableSlots::const_iterator> orderedSlots =
orderedSlotsHelper(ordering, p_variableSlots);
JacobianFactorHelper(graph, orderedSlots);
}
/* ************************************************************************* */
void JacobianFactor::print(const string& s,
const KeyFormatter& formatter) const {

View File

@ -22,6 +22,7 @@
#include <gtsam/linear/NoiseModel.h>
#include <gtsam/base/VerticalBlockMatrix.h>
#include <gtsam/global_includes.h>
#include <gtsam/inference/VariableSlots.h>
#include <boost/make_shared.hpp>
@ -147,14 +148,37 @@ namespace gtsam {
JacobianFactor(
const KEYS& keys, const VerticalBlockMatrix& augmentedMatrix, const SharedDiagonal& sigmas = SharedDiagonal());
/**
* Build a dense joint factor from all the factors in a factor graph. If a VariableSlots
* structure computed for \c graph is already available, providing it will reduce the amount of
* computation performed. */
explicit JacobianFactor(
const GaussianFactorGraph& graph);
/**
* Build a dense joint factor from all the factors in a factor graph. If a VariableSlots
* structure computed for \c graph is already available, providing it will reduce the amount of
* computation performed. */
explicit JacobianFactor(
const GaussianFactorGraph& graph,
boost::optional<const Ordering&> ordering = boost::none,
boost::optional<const VariableSlots&> p_variableSlots = boost::none);
const VariableSlots& p_variableSlots);
/**
* Build a dense joint factor from all the factors in a factor graph. If a VariableSlots
* structure computed for \c graph is already available, providing it will reduce the amount of
* computation performed. */
explicit JacobianFactor(
const GaussianFactorGraph& graph,
const Ordering& ordering);
/**
* Build a dense joint factor from all the factors in a factor graph. If a VariableSlots
* structure computed for \c graph is already available, providing it will reduce the amount of
* computation performed. */
explicit JacobianFactor(
const GaussianFactorGraph& graph,
const Ordering& ordering,
const VariableSlots& p_variableSlots);
/** Virtual destructor */
virtual ~JacobianFactor() {}
@ -356,6 +380,14 @@ namespace gtsam {
private:
/**
* Helper function for public constructors:
* Build a dense joint factor from all the factors in a factor graph. Takes in
* ordered variable slots */
void JacobianFactorHelper(
const GaussianFactorGraph& graph,
const FastVector<VariableSlots::const_iterator>& orderedSlots);
/** Unsafe Constructor that creates an uninitialized Jacobian of right size
* @param keys in some order
* @param diemnsions of the variables in same order

View File

@ -718,15 +718,26 @@ void Null::print(const std::string &s="") const
Null::shared_ptr Null::Create()
{ return shared_ptr(new Null()); }
/* ************************************************************************* */
// Fair
/* ************************************************************************* */
Fair::Fair(double c, const ReweightScheme reweight) : Base(reweight), c_(c) {
if (c_ <= 0) {
throw runtime_error("mEstimator Fair takes only positive double in constructor.");
}
}
/* ************************************************************************* */
// Fair
/* ************************************************************************* */
double Fair::weight(double error) const {
return 1.0 / (1.0 + std::abs(error) / c_);
}
double Fair::residual(double error) const {
const double absError = std::abs(error);
const double normalizedError = absError / c_;
const double c_2 = c_ * c_;
return c_2 * (normalizedError - std::log(1 + normalizedError));
}
void Fair::print(const std::string &s="") const
{ cout << s << "fair (" << c_ << ")" << endl; }
@ -750,6 +761,20 @@ Huber::Huber(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
}
}
double Huber::weight(double error) const {
const double absError = std::abs(error);
return (absError <= k_) ? (1.0) : (k_ / absError);
}
double Huber::residual(double error) const {
const double absError = std::abs(error);
if (absError <= k_) { // |x| <= k
return error*error / 2;
} else { // |x| > k
return k_ * (absError - (k_/2));
}
}
void Huber::print(const std::string &s="") const {
cout << s << "huber (" << k_ << ")" << endl;
}
@ -774,6 +799,16 @@ Cauchy::Cauchy(double k, const ReweightScheme reweight) : Base(reweight), k_(k),
}
}
double Cauchy::weight(double error) const {
return ksquared_ / (ksquared_ + error*error);
}
double Cauchy::residual(double error) const {
const double xc2 = error / k_;
const double val = std::log(1 + (xc2*xc2));
return ksquared_ * val * 0.5;
}
void Cauchy::print(const std::string &s="") const {
cout << s << "cauchy (" << k_ << ")" << endl;
}
@ -791,7 +826,31 @@ Cauchy::shared_ptr Cauchy::Create(double c, const ReweightScheme reweight) {
/* ************************************************************************* */
// Tukey
/* ************************************************************************* */
Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
Tukey::Tukey(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {
if (c <= 0) {
throw runtime_error("mEstimator Tukey takes only positive double in constructor.");
}
}
double Tukey::weight(double error) const {
if (std::abs(error) <= c_) {
const double xc2 = error*error/csquared_;
return (1.0-xc2)*(1.0-xc2);
}
return 0.0;
}
double Tukey::residual(double error) const {
double absError = std::abs(error);
if (absError <= c_) {
const double xc2 = error*error/csquared_;
const double t = (1 - xc2)*(1 - xc2)*(1 - xc2);
return csquared_ * (1 - t) / 6.0;
} else {
return csquared_ / 6.0;
}
}
void Tukey::print(const std::string &s="") const {
std::cout << s << ": Tukey (" << c_ << ")" << std::endl;
@ -810,8 +869,19 @@ Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) {
/* ************************************************************************* */
// Welsch
/* ************************************************************************* */
Welsch::Welsch(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
double Welsch::weight(double error) const {
const double xc2 = (error*error)/csquared_;
return std::exp(-xc2);
}
double Welsch::residual(double error) const {
const double xc2 = (error*error)/csquared_;
return csquared_ * 0.5 * (1 - std::exp(-xc2) );
}
void Welsch::print(const std::string &s="") const {
std::cout << s << ": Welsch (" << c_ << ")" << std::endl;
}
@ -826,24 +896,6 @@ Welsch::shared_ptr Welsch::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Welsch(c, reweight));
}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
Welsh::Welsh(double c, const ReweightScheme reweight) : Base(reweight), c_(c), csquared_(c * c) {}
void Welsh::print(const std::string &s="") const {
std::cout << s << ": Welsh (" << c_ << ")" << std::endl;
}
bool Welsh::equals(const Base &expected, double tol) const {
const Welsh* p = dynamic_cast<const Welsh*>(&expected);
if (p == NULL) return false;
return std::abs(c_ - p->c_) < tol;
}
Welsh::shared_ptr Welsh::Create(double c, const ReweightScheme reweight) {
return shared_ptr(new Welsh(c, reweight));
}
#endif
/* ************************************************************************* */
// GemanMcClure
/* ************************************************************************* */
@ -858,6 +910,12 @@ double GemanMcClure::weight(double error) const {
return c4/(c2error*c2error);
}
double GemanMcClure::residual(double error) const {
const double c2 = c_*c_;
const double error2 = error*error;
return 0.5 * (c2 * error2) / (c2 + error2);
}
void GemanMcClure::print(const std::string &s="") const {
std::cout << s << ": Geman-McClure (" << c_ << ")" << std::endl;
}
@ -890,6 +948,16 @@ double DCS::weight(double error) const {
return 1.0;
}
double DCS::residual(double error) const {
// This is the simplified version of Eq 9 from (Agarwal13icra)
// after you simplify and cancel terms.
const double e2 = error*error;
const double e4 = e2*e2;
const double c2 = c_*c_;
return (c2*e2 + c_*e4) / ((e2 + c_)*(e2 + c_));
}
void DCS::print(const std::string &s="") const {
std::cout << s << ": DCS (" << c_ << ")" << std::endl;
}
@ -908,12 +976,27 @@ DCS::shared_ptr DCS::Create(double c, const ReweightScheme reweight) {
// L2WithDeadZone
/* ************************************************************************* */
L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight) : Base(reweight), k_(k) {
L2WithDeadZone::L2WithDeadZone(double k, const ReweightScheme reweight)
: Base(reweight), k_(k) {
if (k_ <= 0) {
throw runtime_error("mEstimator L2WithDeadZone takes only positive double in constructor.");
}
}
double L2WithDeadZone::weight(double error) const {
// note that this code is slightly uglier than residual, because there are three distinct
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
// cases (deadzone, non-deadzone) in residual.
if (std::abs(error) <= k_) return 0.0;
else if (error > k_) return (-k_+error)/error;
else return (k_+error)/error;
}
double L2WithDeadZone::residual(double error) const {
const double abs_error = std::abs(error);
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
}
void L2WithDeadZone::print(const std::string &s="") const {
std::cout << s << ": L2WithDeadZone (" << k_ << ")" << std::endl;
}

View File

@ -635,9 +635,9 @@ namespace gtsam {
* To illustrate, let's consider the least-squares (L2), L1, and Huber estimators as examples:
*
* Name Symbol Least-Squares L1-norm Huber
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if x<k, 0.5*k^2 + k|x-k| otherwise
* Derivative \phi(x) x sgn(x) x if x<k, k sgn(x) otherwise
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if x<k, k/|x| otherwise
* Residual \rho(x) 0.5*x^2 |x| 0.5*x^2 if |x|<k, 0.5*k^2 + k|x-k| otherwise
* Derivative \phi(x) x sgn(x) x if |x|<k, k sgn(x) otherwise
* Weight w(x)=\phi(x)/x 1 1/|x| 1 if |x|<k, k/|x| otherwise
*
* With these definitions, D(\rho(x), p) = \phi(x) D(x,p) = w(x) x D(x,p) = w(x) D(L2(x), p),
* and hence we can solve the equivalent weighted least squares problem \sum w(r_i) \rho(r_i)
@ -725,10 +725,11 @@ namespace gtsam {
typedef boost::shared_ptr<Null> shared_ptr;
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
virtual ~Null() {}
virtual double weight(double /*error*/) const { return 1.0; }
virtual void print(const std::string &s) const;
virtual bool equals(const Base& /*expected*/, double /*tol*/) const { return true; }
~Null() {}
double weight(double /*error*/) const { return 1.0; }
double residual(double error) const { return error; }
void print(const std::string &s) const;
bool equals(const Base& /*expected*/, double /*tol*/) const { return true; }
static shared_ptr Create() ;
private:
@ -749,11 +750,10 @@ namespace gtsam {
typedef boost::shared_ptr<Fair> shared_ptr;
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
double weight(double error) const {
return 1.0 / (1.0 + std::abs(error) / c_);
}
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double c, const ReweightScheme reweight = Block) ;
private:
@ -775,12 +775,10 @@ namespace gtsam {
typedef boost::shared_ptr<Huber> shared_ptr;
Huber(double k = 1.345, const ReweightScheme reweight = Block);
double weight(double error) const {
double absError = std::abs(error);
return (absError < k_) ? (1.0) : (k_ / absError);
}
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
@ -806,11 +804,10 @@ namespace gtsam {
typedef boost::shared_ptr<Cauchy> shared_ptr;
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
double weight(double error) const {
return ksquared_ / (ksquared_ + error*error);
}
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
@ -832,15 +829,10 @@ namespace gtsam {
typedef boost::shared_ptr<Tukey> shared_ptr;
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
double weight(double error) const {
if (std::abs(error) <= c_) {
double xc2 = error*error/csquared_;
return (1.0-xc2)*(1.0-xc2);
}
return 0.0;
}
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
@ -862,12 +854,10 @@ namespace gtsam {
typedef boost::shared_ptr<Welsch> shared_ptr;
Welsch(double c = 2.9846, const ReweightScheme reweight = Block);
double weight(double error) const {
double xc2 = (error*error)/csquared_;
return std::exp(-xc2);
}
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
@ -885,31 +875,7 @@ namespace gtsam {
// Welsh implements the "Welsch" robust error model (Zhang97ivc)
// This was misspelled in previous versions of gtsam and should be
// removed in the future.
class GTSAM_EXPORT Welsh : public Base {
protected:
double c_, csquared_;
public:
typedef boost::shared_ptr<Welsh> shared_ptr;
Welsh(double c = 2.9846, const ReweightScheme reweight = Block);
double weight(double error) const {
double xc2 = (error*error)/csquared_;
return std::exp(-xc2);
}
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar & BOOST_SERIALIZATION_NVP(c_);
}
};
using Welsh = Welsch;
#endif
/// GemanMcClure implements the "Geman-McClure" robust error model
@ -923,10 +889,11 @@ namespace gtsam {
typedef boost::shared_ptr<GemanMcClure> shared_ptr;
GemanMcClure(double c = 1.0, const ReweightScheme reweight = Block);
virtual ~GemanMcClure() {}
virtual double weight(double error) const;
virtual void print(const std::string &s) const;
virtual bool equals(const Base& expected, double tol=1e-8) const;
~GemanMcClure() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol=1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
protected:
@ -952,10 +919,11 @@ namespace gtsam {
typedef boost::shared_ptr<DCS> shared_ptr;
DCS(double c = 1.0, const ReweightScheme reweight = Block);
virtual ~DCS() {}
virtual double weight(double error) const;
virtual void print(const std::string &s) const;
virtual bool equals(const Base& expected, double tol=1e-8) const;
~DCS() {}
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
protected:
@ -983,21 +951,11 @@ namespace gtsam {
public:
typedef boost::shared_ptr<L2WithDeadZone> shared_ptr;
L2WithDeadZone(double k, const ReweightScheme reweight = Block);
double residual(double error) const {
const double abs_error = std::abs(error);
return (abs_error < k_) ? 0.0 : 0.5*(k_-abs_error)*(k_-abs_error);
}
double weight(double error) const {
// note that this code is slightly uglier than above, because there are three distinct
// cases to handle (left of deadzone, deadzone, right of deadzone) instead of the two
// cases (deadzone, non-deadzone) above.
if (std::abs(error) <= k_) return 0.0;
else if (error > k_) return (-k_+error)/error;
else return (k_+error)/error;
}
void print(const std::string &s) const;
bool equals(const Base& expected, double tol=1e-8) const;
L2WithDeadZone(double k = 1.0, const ReweightScheme reweight = Block);
double weight(double error) const override;
double residual(double error) const override;
void print(const std::string &s) const override;
bool equals(const Base& expected, double tol = 1e-8) const override;
static shared_ptr Create(double k, const ReweightScheme reweight = Block);
private:

View File

@ -77,11 +77,17 @@ public:
/// Construct from a GaussianFactorGraph
RegularHessianFactor(const GaussianFactorGraph& factors,
boost::optional<const Scatter&> scatter = boost::none)
const Scatter& scatter)
: HessianFactor(factors, scatter) {
checkInvariants();
}
/// Construct from a GaussianFactorGraph
RegularHessianFactor(const GaussianFactorGraph& factors)
: HessianFactor(factors) {
checkInvariants();
}
private:
/// Check invariants after construction

View File

@ -33,16 +33,16 @@ string SlotEntry::toString() const {
return oss.str();
}
Scatter::Scatter(const GaussianFactorGraph& gfg) : Scatter(gfg, Ordering()) {}
/* ************************************************************************* */
Scatter::Scatter(const GaussianFactorGraph& gfg,
boost::optional<const Ordering&> ordering) {
const Ordering& ordering) {
gttic(Scatter_Constructor);
// If we have an ordering, pre-fill the ordered variables first
if (ordering) {
for (Key key : *ordering) {
add(key, 0);
}
for (Key key : ordering) {
add(key, 0);
}
// Now, find dimensions of variables and/or extend
@ -68,7 +68,7 @@ Scatter::Scatter(const GaussianFactorGraph& gfg,
// To keep the same behavior as before, sort the keys after the ordering
iterator first = begin();
if (ordering) first += ordering->size();
first += ordering.size();
if (first != end()) std::sort(first, end());
}

View File

@ -23,8 +23,6 @@
#include <gtsam/base/FastMap.h>
#include <gtsam/dllexport.h>
#include <boost/optional.hpp>
namespace gtsam {
class GaussianFactorGraph;
@ -53,15 +51,16 @@ class Scatter : public FastVector<SlotEntry> {
/// Default Constructor
Scatter() {}
/// Construct from gaussian factor graph, with optional (partial or complete) ordering
Scatter(const GaussianFactorGraph& gfg,
boost::optional<const Ordering&> ordering = boost::none);
/// Construct from gaussian factor graph, without ordering
explicit Scatter(const GaussianFactorGraph& gfg);
/// Construct from gaussian factor graph, with (partial or complete) ordering
explicit Scatter(const GaussianFactorGraph& gfg, const Ordering& ordering);
/// Add a key/dim pair
void add(Key key, size_t dim);
private:
/// Find the SlotEntry with the right key (linear time worst case)
iterator find(Key key);
};

View File

@ -457,6 +457,22 @@ TEST(NoiseModel, WhitenInPlace)
* penalties using iteratively re-weighted least squares.
*/
TEST(NoiseModel, robustFunctionFair)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
const mEstimator::Fair::shared_ptr fair = mEstimator::Fair::Create(k);
DOUBLES_EQUAL(0.8333333333333333, fair->weight(error1), 1e-8);
DOUBLES_EQUAL(0.3333333333333333, fair->weight(error2), 1e-8);
// Test negative value to ensure we take absolute value of error.
DOUBLES_EQUAL(0.3333333333333333, fair->weight(error3), 1e-8);
DOUBLES_EQUAL(0.8333333333333333, fair->weight(error4), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->residual(error1), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->residual(error2), 1e-8);
DOUBLES_EQUAL(22.534692783297260, fair->residual(error3), 1e-8);
DOUBLES_EQUAL(0.441961080151135, fair->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionHuber)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
@ -466,26 +482,86 @@ TEST(NoiseModel, robustFunctionHuber)
// Test negative value to ensure we take absolute value of error.
DOUBLES_EQUAL(0.5, huber->weight(error3), 1e-8);
DOUBLES_EQUAL(1.0, huber->weight(error4), 1e-8);
DOUBLES_EQUAL(0.5000, huber->residual(error1), 1e-8);
DOUBLES_EQUAL(37.5000, huber->residual(error2), 1e-8);
DOUBLES_EQUAL(37.5000, huber->residual(error3), 1e-8);
DOUBLES_EQUAL(0.5000, huber->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionCauchy)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
const mEstimator::Cauchy::shared_ptr cauchy = mEstimator::Cauchy::Create(k);
DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error1), 1e-8);
DOUBLES_EQUAL(0.2000, cauchy->weight(error2), 1e-8);
// Test negative value to ensure we take absolute value of error.
DOUBLES_EQUAL(0.2000, cauchy->weight(error3), 1e-8);
DOUBLES_EQUAL(0.961538461538461, cauchy->weight(error4), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error1), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error2), 1e-8);
DOUBLES_EQUAL(20.117973905426254, cauchy->residual(error3), 1e-8);
DOUBLES_EQUAL(0.490258914416017, cauchy->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionGemanMcClure)
{
const double k = 1.0, error1 = 1.0, error2 = 10.0;
const double k = 1.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
const mEstimator::GemanMcClure::shared_ptr gmc = mEstimator::GemanMcClure::Create(k);
const double weight1 = gmc->weight(error1),
weight2 = gmc->weight(error2);
DOUBLES_EQUAL(0.25 , weight1, 1e-8);
DOUBLES_EQUAL(9.80296e-5, weight2, 1e-8);
DOUBLES_EQUAL(0.25 , gmc->weight(error1), 1e-8);
DOUBLES_EQUAL(9.80296e-5, gmc->weight(error2), 1e-8);
DOUBLES_EQUAL(9.80296e-5, gmc->weight(error3), 1e-8);
DOUBLES_EQUAL(0.25 , gmc->weight(error4), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->residual(error1), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error2), 1e-8);
DOUBLES_EQUAL(0.495049504950495, gmc->residual(error3), 1e-8);
DOUBLES_EQUAL(0.2500, gmc->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionWelsch)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
const mEstimator::Welsch::shared_ptr welsch = mEstimator::Welsch::Create(k);
DOUBLES_EQUAL(0.960789439152323, welsch->weight(error1), 1e-8);
DOUBLES_EQUAL(0.018315638888734, welsch->weight(error2), 1e-8);
// Test negative value to ensure we take absolute value of error.
DOUBLES_EQUAL(0.018315638888734, welsch->weight(error3), 1e-8);
DOUBLES_EQUAL(0.960789439152323, welsch->weight(error4), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error1), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error2), 1e-8);
DOUBLES_EQUAL(12.271054513890823, welsch->residual(error3), 1e-8);
DOUBLES_EQUAL(0.490132010595960, welsch->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionTukey)
{
const double k = 5.0, error1 = 1.0, error2 = 10.0, error3 = -10.0, error4 = -1.0;
const mEstimator::Tukey::shared_ptr tukey = mEstimator::Tukey::Create(k);
DOUBLES_EQUAL(0.9216, tukey->weight(error1), 1e-8);
DOUBLES_EQUAL(0.0, tukey->weight(error2), 1e-8);
// Test negative value to ensure we take absolute value of error.
DOUBLES_EQUAL(0.0, tukey->weight(error3), 1e-8);
DOUBLES_EQUAL(0.9216, tukey->weight(error4), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error1), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error2), 1e-8);
DOUBLES_EQUAL(4.166666666666667, tukey->residual(error3), 1e-8);
DOUBLES_EQUAL(0.480266666666667, tukey->residual(error4), 1e-8);
}
TEST(NoiseModel, robustFunctionDCS)
{
const double k = 1.0, error1 = 1.0, error2 = 10.0;
const mEstimator::DCS::shared_ptr dcs = mEstimator::DCS::Create(k);
const double weight1 = dcs->weight(error1),
weight2 = dcs->weight(error2);
DOUBLES_EQUAL(1.0 , weight1, 1e-8);
DOUBLES_EQUAL(0.00039211, weight2, 1e-8);
DOUBLES_EQUAL(1.0 , dcs->weight(error1), 1e-8);
DOUBLES_EQUAL(0.00039211, dcs->weight(error2), 1e-8);
DOUBLES_EQUAL(0.5 , dcs->residual(error1), 1e-8);
DOUBLES_EQUAL(0.9900990099, dcs->residual(error2), 1e-8);
}
TEST(NoiseModel, robustFunctionL2WithDeadZone)

View File

@ -1,7 +1,6 @@
// This causes Doxygen to find classes inside the gtsam namespace without
// explicitly specifying it when writing class names.
namespace gtsam {
/**
\mainpage GTSAM
@ -17,11 +16,10 @@ To use GTSAM to solve your own problems, you will often have to create new facto
-# The number of variables your factor involves is <b>unknown</b> at compile time - derive from NoiseModelFactor and implement NoiseModelFactor::unwhitenedError()
- This is a factor expressing the sum-of-squares error between a measurement \f$ z \f$ and a measurement prediction function \f$ h(x) \f$, on which the errors are expected to follow some distribution specified by a noise model (see noiseModel).
-# The number of variables your factor involves is <b>known</b> at compile time and is between 1 and 6 - derive from NonlinearFactor1, NonlinearFactor2, NonlinearFactor3, NonlinearFactor4, NonlinearFactor5, or NonlinearFactor6, and implement <b>\c evaluateError()</b>
-# The number of variables your factor involves is <b>known</b> at compile time and is between 1 and 6 - derive from NoiseModelFactor1, NoiseModelFactor2, NoiseModelFactor3, NoiseModelFactor4, NoiseModelFactor5, or NoiseModelFactor6, and implement <b>\c evaluateError()</b>
- This factor expresses the same sum-of-squares error with a noise model, but makes the implementation task slightly easier than with %NoiseModelFactor.
-# Derive from NonlinearFactor
- This is more advanced and allows creating factors without an explicit noise model, or that linearize to HessianFactor instead of JacobianFactor.
*/
}
}

View File

@ -35,7 +35,7 @@ class AttitudeFactor {
protected:
const Unit3 nZ_, bRef_; ///< Position measurement in
Unit3 nZ_, bRef_; ///< Position measurement in
public:
@ -56,12 +56,19 @@ public:
Vector attitudeError(const Rot3& p,
OptionalJacobian<2,3> H = boost::none) const;
const Unit3& nZ() const {
return nZ_;
}
const Unit3& bRef() const {
return bRef_;
}
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int /*version*/) {
ar & boost::serialization::make_nvp("nZ_", const_cast<Unit3&>(nZ_));
ar & boost::serialization::make_nvp("bRef_", const_cast<Unit3&>(bRef_));
ar & boost::serialization::make_nvp("nZ_", nZ_);
ar & boost::serialization::make_nvp("bRef_", bRef_);
}
};
@ -118,12 +125,6 @@ public:
boost::optional<Matrix&> H = boost::none) const {
return attitudeError(nRb, H);
}
Unit3 nZ() const {
return nZ_;
}
Unit3 bRef() const {
return bRef_;
}
private:
@ -204,12 +205,6 @@ public:
}
return e;
}
Unit3 nZ() const {
return nZ_;
}
Unit3 bRef() const {
return bRef_;
}
private:

View File

@ -54,6 +54,56 @@ typedef ManifoldPreintegration PreintegrationType;
* Robotics: Science and Systems (RSS), 2015.
*/
/// Parameters for pre-integration using PreintegratedCombinedMeasurements:
/// Usage: Create just a single Params and pass a shared pointer to the constructor
struct GTSAM_EXPORT PreintegrationCombinedParams : PreintegrationParams {
Matrix3 biasAccCovariance; ///< continuous-time "Covariance" describing accelerometer bias random walk
Matrix3 biasOmegaCovariance; ///< continuous-time "Covariance" describing gyroscope bias random walk
Matrix6 biasAccOmegaInt; ///< covariance of bias used for pre-integration
/// See two named constructors below for good values of n_gravity in body frame
PreintegrationCombinedParams(const Vector3& n_gravity) :
PreintegrationParams(n_gravity), biasAccCovariance(I_3x3), biasOmegaCovariance(
I_3x3), biasAccOmegaInt(I_6x6) {
}
// Default Params for a Z-down navigation frame, such as NED: gravity points along positive Z-axis
static boost::shared_ptr<PreintegrationCombinedParams> MakeSharedD(double g = 9.81) {
return boost::shared_ptr<PreintegrationCombinedParams>(new PreintegrationCombinedParams(Vector3(0, 0, g)));
}
// Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis
static boost::shared_ptr<PreintegrationCombinedParams> MakeSharedU(double g = 9.81) {
return boost::shared_ptr<PreintegrationCombinedParams>(new PreintegrationCombinedParams(Vector3(0, 0, -g)));
}
void setBiasAccCovariance(const Matrix3& cov) { biasAccCovariance=cov; }
void setBiasOmegaCovariance(const Matrix3& cov) { biasOmegaCovariance=cov; }
void setBiasAccOmegaInt(const Matrix6& cov) { biasAccOmegaInt=cov; }
const Matrix3& getBiasAccCovariance() const { return biasAccCovariance; }
const Matrix3& getBiasOmegaCovariance() const { return biasOmegaCovariance; }
const Matrix6& getBiasAccOmegaInt() const { return biasAccOmegaInt; }
private:
/// Default constructor makes unitialized params struct
PreintegrationCombinedParams() {}
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegratedRotation::Params);
ar& BOOST_SERIALIZATION_NVP(biasAccCovariance);
ar& BOOST_SERIALIZATION_NVP(biasOmegaCovariance);
ar& BOOST_SERIALIZATION_NVP(biasAccOmegaInt);
}
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
/**
* PreintegratedCombinedMeasurements integrates the IMU measurements
* (rotation rates and accelerations) and the corresponding covariance matrix.
@ -67,47 +117,7 @@ typedef ManifoldPreintegration PreintegrationType;
class GTSAM_EXPORT PreintegratedCombinedMeasurements : public PreintegrationType {
public:
/// Parameters for pre-integration:
/// Usage: Create just a single Params and pass a shared pointer to the constructor
struct Params : PreintegrationParams {
Matrix3 biasAccCovariance; ///< continuous-time "Covariance" describing accelerometer bias random walk
Matrix3 biasOmegaCovariance; ///< continuous-time "Covariance" describing gyroscope bias random walk
Matrix6 biasAccOmegaInt; ///< covariance of bias used for pre-integration
/// See two named constructors below for good values of n_gravity in body frame
Params(const Vector3& n_gravity) :
PreintegrationParams(n_gravity), biasAccCovariance(I_3x3), biasOmegaCovariance(
I_3x3), biasAccOmegaInt(I_6x6) {
}
// Default Params for a Z-down navigation frame, such as NED: gravity points along positive Z-axis
static boost::shared_ptr<Params> MakeSharedD(double g = 9.81) {
return boost::shared_ptr<Params>(new Params(Vector3(0, 0, g)));
}
// Default Params for a Z-up navigation frame, such as ENU: gravity points along negative Z-axis
static boost::shared_ptr<Params> MakeSharedU(double g = 9.81) {
return boost::shared_ptr<Params>(new Params(Vector3(0, 0, -g)));
}
private:
/// Default constructor makes unitialized params struct
Params() {}
/** Serialization function */
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(PreintegratedRotation::Params);
ar& BOOST_SERIALIZATION_NVP(biasAccCovariance);
ar& BOOST_SERIALIZATION_NVP(biasOmegaCovariance);
ar& BOOST_SERIALIZATION_NVP(biasAccOmegaInt);
}
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
typedef PreintegrationCombinedParams Params;
protected:
/* Covariance matrix of the preintegrated measurements

View File

@ -75,6 +75,23 @@ TEST(Rot3AttitudeFactor, Serialization) {
EXPECT(serializationTestHelpers::equalsBinary(factor));
}
/* ************************************************************************* */
TEST(Rot3AttitudeFactor, CopyAndMove) {
Unit3 nDown(0, 0, -1);
SharedNoiseModel model = noiseModel::Isotropic::Sigma(2, 0.25);
Rot3AttitudeFactor factor(0, nDown, model);
// Copy assignable.
EXPECT(std::is_copy_assignable<Rot3AttitudeFactor>::value);
Rot3AttitudeFactor factor_copied = factor;
EXPECT(assert_equal(factor, factor_copied));
// Move assignable.
EXPECT(std::is_move_assignable<Rot3AttitudeFactor>::value);
Rot3AttitudeFactor factor_moved = std::move(factor_copied);
EXPECT(assert_equal(factor, factor_moved));
}
// *************************************************************************
TEST( Pose3AttitudeFactor, Constructor ) {
@ -119,6 +136,23 @@ TEST(Pose3AttitudeFactor, Serialization) {
EXPECT(serializationTestHelpers::equalsBinary(factor));
}
/* ************************************************************************* */
TEST(Pose3AttitudeFactor, CopyAndMove) {
Unit3 nDown(0, 0, -1);
SharedNoiseModel model = noiseModel::Isotropic::Sigma(2, 0.25);
Pose3AttitudeFactor factor(0, nDown, model);
// Copy assignable.
EXPECT(std::is_copy_assignable<Pose3AttitudeFactor>::value);
Pose3AttitudeFactor factor_copied = factor;
EXPECT(assert_equal(factor, factor_copied));
// Move assignable.
EXPECT(std::is_move_assignable<Pose3AttitudeFactor>::value);
Pose3AttitudeFactor factor_moved = std::move(factor_copied);
EXPECT(assert_equal(factor, factor_moved));
}
// *************************************************************************
int main() {
TestResult tr;

View File

@ -26,19 +26,70 @@ using namespace std;
namespace gtsam {
/* ************************************************************************* */
Marginals::Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization,
EliminateableFactorGraph<GaussianFactorGraph>::OptionalOrdering ordering)
{
Marginals::Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization)
: values_(solution), factorization_(factorization) {
gttic(MarginalsConstructor);
// Linearize graph
graph_ = *graph.linearize(solution);
computeBayesTree();
}
// Store values
values_ = solution;
/* ************************************************************************* */
Marginals::Marginals(const NonlinearFactorGraph& graph, const Values& solution, const Ordering& ordering,
Factorization factorization)
: values_(solution), factorization_(factorization) {
gttic(MarginalsConstructor);
graph_ = *graph.linearize(solution);
computeBayesTree(ordering);
}
/* ************************************************************************* */
Marginals::Marginals(const GaussianFactorGraph& graph, const Values& solution, Factorization factorization)
: graph_(graph), values_(solution), factorization_(factorization) {
gttic(MarginalsConstructor);
computeBayesTree();
}
/* ************************************************************************* */
Marginals::Marginals(const GaussianFactorGraph& graph, const Values& solution, const Ordering& ordering,
Factorization factorization)
: graph_(graph), values_(solution), factorization_(factorization) {
gttic(MarginalsConstructor);
computeBayesTree(ordering);
}
/* ************************************************************************* */
Marginals::Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, Factorization factorization)
: graph_(graph), factorization_(factorization) {
gttic(MarginalsConstructor);
for (const auto& keyValue: solution) {
values_.insert(keyValue.first, keyValue.second);
}
computeBayesTree();
}
/* ************************************************************************* */
Marginals::Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, const Ordering& ordering,
Factorization factorization)
: graph_(graph), factorization_(factorization) {
gttic(MarginalsConstructor);
for (const auto& keyValue: solution) {
values_.insert(keyValue.first, keyValue.second);
}
computeBayesTree(ordering);
}
/* ************************************************************************* */
void Marginals::computeBayesTree() {
// Compute BayesTree
if(factorization_ == CHOLESKY)
bayesTree_ = *graph_.eliminateMultifrontal(EliminatePreferCholesky);
else if(factorization_ == QR)
bayesTree_ = *graph_.eliminateMultifrontal(EliminateQR);
}
/* ************************************************************************* */
void Marginals::computeBayesTree(const Ordering& ordering) {
// Compute BayesTree
factorization_ = factorization;
if(factorization_ == CHOLESKY)
bayesTree_ = *graph_.eliminateMultifrontal(ordering, EliminatePreferCholesky);
else if(factorization_ == QR)
@ -109,9 +160,9 @@ JointMarginal Marginals::jointMarginalInformation(const KeyVector& variables) co
jointFG = *bayesTree_.joint(variables[0], variables[1], EliminateQR);
} else {
if(factorization_ == CHOLESKY)
jointFG = GaussianFactorGraph(*graph_.marginalMultifrontalBayesTree(variables, boost::none, EliminatePreferCholesky));
jointFG = GaussianFactorGraph(*graph_.marginalMultifrontalBayesTree(variables, EliminatePreferCholesky));
else if(factorization_ == QR)
jointFG = GaussianFactorGraph(*graph_.marginalMultifrontalBayesTree(variables, boost::none, EliminateQR));
jointFG = GaussianFactorGraph(*graph_.marginalMultifrontalBayesTree(variables, EliminateQR));
}
// Get information matrix
@ -125,7 +176,7 @@ JointMarginal Marginals::jointMarginalInformation(const KeyVector& variables) co
// Get dimensions from factor graph
std::vector<size_t> dims;
dims.reserve(variablesSorted.size());
for(Key key: variablesSorted) {
for(const auto& key: variablesSorted) {
dims.push_back(values_.at(key).dim());
}
@ -142,7 +193,7 @@ VectorValues Marginals::optimize() const {
void JointMarginal::print(const std::string& s, const KeyFormatter& formatter) const {
cout << s << "Joint marginal on keys ";
bool first = true;
for(Key key: keys_) {
for(const auto& key: keys_) {
if(!first)
cout << ", ";
else

View File

@ -51,14 +51,54 @@ public:
/// Default constructor only for Cython wrapper
Marginals(){}
/** Construct a marginals class.
/** Construct a marginals class from a nonlinear factor graph.
* @param graph The factor graph defining the full joint density on all variables.
* @param solution The linearization point about which to compute Gaussian marginals (usually the MLE as obtained from a NonlinearOptimizer).
* @param factorization The linear decomposition mode - either Marginals::CHOLESKY (faster and suitable for most problems) or Marginals::QR (slower but more numerically stable for poorly-conditioned problems).
* @param ordering An optional variable ordering for elimination.
*/
Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization = CHOLESKY,
EliminateableFactorGraph<GaussianFactorGraph>::OptionalOrdering ordering = boost::none);
Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization = CHOLESKY);
/** Construct a marginals class from a nonlinear factor graph.
* @param graph The factor graph defining the full joint density on all variables.
* @param solution The linearization point about which to compute Gaussian marginals (usually the MLE as obtained from a NonlinearOptimizer).
* @param factorization The linear decomposition mode - either Marginals::CHOLESKY (faster and suitable for most problems) or Marginals::QR (slower but more numerically stable for poorly-conditioned problems).
* @param ordering The ordering for elimination.
*/
Marginals(const NonlinearFactorGraph& graph, const Values& solution, const Ordering& ordering,
Factorization factorization = CHOLESKY);
/** Construct a marginals class from a linear factor graph.
* @param graph The factor graph defining the full joint density on all variables.
* @param solution The solution point to compute Gaussian marginals.
* @param factorization The linear decomposition mode - either Marginals::CHOLESKY (faster and suitable for most problems) or Marginals::QR (slower but more numerically stable for poorly-conditioned problems).
*/
Marginals(const GaussianFactorGraph& graph, const Values& solution, Factorization factorization = CHOLESKY);
/** Construct a marginals class from a linear factor graph.
* @param graph The factor graph defining the full joint density on all variables.
* @param solution The solution point to compute Gaussian marginals.
* @param factorization The linear decomposition mode - either Marginals::CHOLESKY (faster and suitable for most problems) or Marginals::QR (slower but more numerically stable for poorly-conditioned problems).
* @param ordering The ordering for elimination.
*/
Marginals(const GaussianFactorGraph& graph, const Values& solution, const Ordering& ordering,
Factorization factorization = CHOLESKY);
/** Construct a marginals class from a linear factor graph.
* @param graph The factor graph defining the full joint density on all variables.
* @param solution The solution point to compute Gaussian marginals.
* @param factorization The linear decomposition mode - either Marginals::CHOLESKY (faster and suitable for most problems) or Marginals::QR (slower but more numerically stable for poorly-conditioned problems).
* @param ordering An optional variable ordering for elimination.
*/
Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, Factorization factorization = CHOLESKY);
/** Construct a marginals class from a linear factor graph.
* @param graph The factor graph defining the full joint density on all variables.
* @param solution The solution point to compute Gaussian marginals.
* @param factorization The linear decomposition mode - either Marginals::CHOLESKY (faster and suitable for most problems) or Marginals::QR (slower but more numerically stable for poorly-conditioned problems).
* @param ordering An optional variable ordering for elimination.
*/
Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, const Ordering& ordering,
Factorization factorization = CHOLESKY);
/** print */
void print(const std::string& str = "Marginals: ", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
@ -81,6 +121,28 @@ public:
/** Optimize the bayes tree */
VectorValues optimize() const;
protected:
/** Compute the Bayes Tree as a helper function to the constructor */
void computeBayesTree();
/** Compute the Bayes Tree as a helper function to the constructor */
void computeBayesTree(const Ordering& ordering);
public:
/** \deprecated argument order changed due to removing boost::optional<Ordering> */
Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization,
const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {}
/** \deprecated argument order changed due to removing boost::optional<Ordering> */
Marginals(const GaussianFactorGraph& graph, const Values& solution, Factorization factorization,
const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {}
/** \deprecated argument order changed due to removing boost::optional<Ordering> */
Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, Factorization factorization,
const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {}
};
/**

View File

@ -344,23 +344,31 @@ GaussianFactorGraph::shared_ptr NonlinearFactorGraph::linearize(const Values& li
}
/* ************************************************************************* */
static Scatter scatterFromValues(const Values& values, boost::optional<Ordering&> ordering) {
static Scatter scatterFromValues(const Values& values) {
gttic(scatterFromValues);
Scatter scatter;
scatter.reserve(values.size());
if (!ordering) {
// use "natural" ordering with keys taken from the initial values
for (const auto& key_value : values) {
scatter.add(key_value.key, key_value.value.dim());
}
} else {
// copy ordering into keys and lookup dimension in values, is O(n*log n)
for (Key key : *ordering) {
const Value& value = values.at(key);
scatter.add(key, value.dim());
}
// use "natural" ordering with keys taken from the initial values
for (const auto& key_value : values) {
scatter.add(key_value.key, key_value.value.dim());
}
return scatter;
}
/* ************************************************************************* */
static Scatter scatterFromValues(const Values& values, const Ordering& ordering) {
gttic(scatterFromValues);
Scatter scatter;
scatter.reserve(values.size());
// copy ordering into keys and lookup dimension in values, is O(n*log n)
for (Key key : ordering) {
const Value& value = values.at(key);
scatter.add(key, value.dim());
}
return scatter;
@ -368,11 +376,7 @@ static Scatter scatterFromValues(const Values& values, boost::optional<Ordering&
/* ************************************************************************* */
HessianFactor::shared_ptr NonlinearFactorGraph::linearizeToHessianFactor(
const Values& values, boost::optional<Ordering&> ordering, const Dampen& dampen) const {
gttic(NonlinearFactorGraph_linearizeToHessianFactor);
Scatter scatter = scatterFromValues(values, ordering);
const Values& values, const Scatter& scatter, const Dampen& dampen) const {
// NOTE(frank): we are heavily leaning on friendship below
HessianFactor::shared_ptr hessianFactor(new HessianFactor(scatter));
@ -393,9 +397,36 @@ HessianFactor::shared_ptr NonlinearFactorGraph::linearizeToHessianFactor(
return hessianFactor;
}
/* ************************************************************************* */
HessianFactor::shared_ptr NonlinearFactorGraph::linearizeToHessianFactor(
const Values& values, const Ordering& order, const Dampen& dampen) const {
gttic(NonlinearFactorGraph_linearizeToHessianFactor);
Scatter scatter = scatterFromValues(values, order);
return linearizeToHessianFactor(values, scatter, dampen);
}
/* ************************************************************************* */
HessianFactor::shared_ptr NonlinearFactorGraph::linearizeToHessianFactor(
const Values& values, const Dampen& dampen) const {
gttic(NonlinearFactorGraph_linearizeToHessianFactor);
Scatter scatter = scatterFromValues(values);
return linearizeToHessianFactor(values, scatter, dampen);
}
/* ************************************************************************* */
Values NonlinearFactorGraph::updateCholesky(const Values& values,
boost::optional<Ordering&> ordering,
const Dampen& dampen) const {
gttic(NonlinearFactorGraph_updateCholesky);
auto hessianFactor = linearizeToHessianFactor(values, dampen);
VectorValues delta = hessianFactor->solve();
return values.retract(delta);
}
/* ************************************************************************* */
Values NonlinearFactorGraph::updateCholesky(const Values& values,
const Ordering& ordering,
const Dampen& dampen) const {
gttic(NonlinearFactorGraph_updateCholesky);
auto hessianFactor = linearizeToHessianFactor(values, ordering, dampen);

View File

@ -149,17 +149,31 @@ namespace gtsam {
* Instead of producing a GaussianFactorGraph, pre-allocate and linearize directly
* into a HessianFactor. Avoids the many mallocs and pointer indirection in constructing
* a new graph, and hence useful in case a dense solve is appropriate for your problem.
* An optional ordering can be given that still decides how the Hessian is laid out.
* An optional lambda function can be used to apply damping on the filled Hessian.
* No parallelism is exploited, because all the factors write in the same memory.
*/
boost::shared_ptr<HessianFactor> linearizeToHessianFactor(
const Values& values, boost::optional<Ordering&> ordering = boost::none,
const Dampen& dampen = nullptr) const;
const Values& values, const Dampen& dampen = nullptr) const;
/**
* Instead of producing a GaussianFactorGraph, pre-allocate and linearize directly
* into a HessianFactor. Avoids the many mallocs and pointer indirection in constructing
* a new graph, and hence useful in case a dense solve is appropriate for your problem.
* An ordering is given that still decides how the Hessian is laid out.
* An optional lambda function can be used to apply damping on the filled Hessian.
* No parallelism is exploited, because all the factors write in the same memory.
*/
boost::shared_ptr<HessianFactor> linearizeToHessianFactor(
const Values& values, const Ordering& ordering, const Dampen& dampen = nullptr) const;
/// Linearize and solve in one pass.
/// Calls linearizeToHessianFactor, densely solves the normal equations, and updates the values.
Values updateCholesky(const Values& values, boost::optional<Ordering&> ordering = boost::none,
Values updateCholesky(const Values& values,
const Dampen& dampen = nullptr) const;
/// Linearize and solve in one pass.
/// Calls linearizeToHessianFactor, densely solves the normal equations, and updates the values.
Values updateCholesky(const Values& values, const Ordering& ordering,
const Dampen& dampen = nullptr) const;
/// Clone() performs a deep-copy of the graph, including all of the factors
@ -190,6 +204,13 @@ namespace gtsam {
private:
/**
* Linearize from Scatter rather than from Ordering. Made private because
* it doesn't include gttic.
*/
boost::shared_ptr<HessianFactor> linearizeToHessianFactor(
const Values& values, const Scatter& scatter, const Dampen& dampen = nullptr) const;
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
@ -197,6 +218,19 @@ namespace gtsam {
ar & boost::serialization::make_nvp("NonlinearFactorGraph",
boost::serialization::base_object<Base>(*this));
}
public:
/** \deprecated */
boost::shared_ptr<HessianFactor> linearizeToHessianFactor(
const Values& values, boost::none_t, const Dampen& dampen = nullptr) const
{return linearizeToHessianFactor(values, dampen);}
/** \deprecated */
Values updateCholesky(const Values& values, boost::none_t,
const Dampen& dampen = nullptr) const
{return updateCholesky(values, dampen);}
};
/// traits

View File

@ -101,7 +101,7 @@ void NonlinearOptimizer::defaultOptimize() {
cout << "newError: " << error() << endl;
} while (iterations() < params.maxIterations &&
!checkConvergence(params.relativeErrorTol, params.absoluteErrorTol, params.errorTol,
currentError, error(), params.verbosity));
currentError, error(), params.verbosity) && std::isfinite(currentError));
// Printing if verbose
if (params.verbosity >= NonlinearOptimizerParams::TERMINATION) {
@ -128,18 +128,22 @@ VectorValues NonlinearOptimizer::solve(const GaussianFactorGraph& gfg,
const NonlinearOptimizerParams& params) const {
// solution of linear solver is an update to the linearization point
VectorValues delta;
boost::optional<const Ordering&> optionalOrdering;
if (params.ordering)
optionalOrdering.reset(*params.ordering);
// Check which solver we are using
if (params.isMultifrontal()) {
// Multifrontal QR or Cholesky (decided by params.getEliminationFunction())
delta = gfg.optimize(optionalOrdering, params.getEliminationFunction());
if (params.ordering)
delta = gfg.optimize(*params.ordering, params.getEliminationFunction());
else
delta = gfg.optimize(params.getEliminationFunction());
} else if (params.isSequential()) {
// Sequential QR or Cholesky (decided by params.getEliminationFunction())
delta = gfg.eliminateSequential(optionalOrdering, params.getEliminationFunction(), boost::none,
params.orderingType)->optimize();
if (params.ordering)
delta = gfg.eliminateSequential(*params.ordering, params.getEliminationFunction(),
boost::none, params.orderingType)->optimize();
else
delta = gfg.eliminateSequential(params.getEliminationFunction(), boost::none,
params.orderingType)->optimize();
} else if (params.isIterative()) {
// Conjugate Gradient -> needs params.iterativeParams
if (!params.iterativeParams)

View File

@ -82,7 +82,7 @@ public:
};
LinearSolverType linearSolverType; ///< The type of linear solver to use in the nonlinear optimizer
boost::optional<Ordering> ordering; ///< The variable elimination ordering, or empty to use COLAMD (default: empty)
boost::optional<Ordering> ordering; ///< The optional variable elimination ordering, or empty to use COLAMD (default: empty)
IterativeOptimizationParameters::shared_ptr iterativeParams; ///< The container for iterativeOptimization parameters. used in CG Solvers.
inline bool isMultifrontal() const {

View File

@ -39,6 +39,7 @@
#pragma GCC diagnostic pop
#endif
#include <boost/ptr_container/serialize_ptr_map.hpp>
#include <boost/shared_ptr.hpp>
#include <string>
#include <utility>

View File

@ -1,8 +1,7 @@
# Extra CMake definitions for GTSAM
set (GTSAM_VERSION_MAJOR @GTSAM_VERSION_MAJOR@)
set (GTSAM_VERSION_MINOR @GTSAM_VERSION_MINOR@)
set (GTSAM_VERSION_PATCH @GTSAM_VERSION_PATCH@)
# All version variables are handled by GTSAMConfigVersion.cmake, except these
# two below. We continue to set them here in case someone uses them
set (GTSAM_VERSION_NUMERIC @GTSAM_VERSION_NUMERIC@)
set (GTSAM_VERSION_STRING "@GTSAM_VERSION_STRING@")

View File

@ -14,7 +14,14 @@ using namespace std;
namespace gtsam {
/// Find the best total assignment - can be expensive
CSP::sharedValues CSP::optimalAssignment(OptionalOrdering ordering) const {
CSP::sharedValues CSP::optimalAssignment() const {
DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential();
sharedValues mpe = chordal->optimize();
return mpe;
}
/// Find the best total assignment - can be expensive
CSP::sharedValues CSP::optimalAssignment(const Ordering& ordering) const {
DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(ordering);
sharedValues mpe = chordal->optimize();
return mpe;

View File

@ -60,7 +60,10 @@ namespace gtsam {
// }
/// Find the best total assignment - can be expensive
sharedValues optimalAssignment(OptionalOrdering ordering = boost::none) const;
sharedValues optimalAssignment() const;
/// Find the best total assignment - can be expensive
sharedValues optimalAssignment(const Ordering& ordering) const;
// /*
// * Perform loopy belief propagation

View File

@ -145,5 +145,19 @@ int main(int argc, char** argv) {
cout << setprecision(5) << " Key: " << key_timestamp.first << " Time: " << key_timestamp.second << endl;
}
// Here is an example of how to get the full Jacobian of the problem.
// First, get the linearization point.
Values result = smootherISAM2.calculateEstimate();
// Get the factor graph
auto &factorGraph = smootherISAM2.getFactors();
// Linearize to a Gaussian factor graph
boost::shared_ptr<GaussianFactorGraph> linearGraph = factorGraph.linearize(result);
// Converts the linear graph into a Jacobian factor and extracts the Jacobian matrix
Matrix jacobian = linearGraph->jacobian().first;
cout << " Jacobian: " << jacobian << endl;
return 0;
}

View File

@ -39,7 +39,7 @@ public:
/** default constructor */
IncrementalFixedLagSmoother(double smootherLag = 0.0,
const ISAM2Params& parameters = ISAM2Params()) :
const ISAM2Params& parameters = DefaultISAM2Params()) :
FixedLagSmoother(smootherLag), isam_(parameters) {
}
@ -114,6 +114,14 @@ public:
const ISAM2Result& getISAM2Result() const{ return isamResult_; }
protected:
/** Create default parameters */
static ISAM2Params DefaultISAM2Params() {
ISAM2Params params;
params.findUnusedFactorSlots = true;
return params;
}
/** An iSAM2 object used to perform inference. The smoother lag is controlled
* by what factors are removed each iteration */
ISAM2 isam_;

View File

@ -46,21 +46,26 @@ class NonlinearClusterTree : public ClusterTree<NonlinearFactorGraph> {
// linearize local custer factors straight into hessianFactor, which is returned
// If no ordering given, uses colamd
HessianFactor::shared_ptr linearizeToHessianFactor(
const Values& values, boost::optional<Ordering> ordering = boost::none,
const Values& values,
const NonlinearFactorGraph::Dampen& dampen = nullptr) const {
if (!ordering)
ordering.reset(Ordering::ColamdConstrainedFirst(factors, orderedFrontalKeys, true));
return factors.linearizeToHessianFactor(values, *ordering, dampen);
Ordering ordering;
ordering = Ordering::ColamdConstrainedFirst(factors, orderedFrontalKeys, true);
return factors.linearizeToHessianFactor(values, ordering, dampen);
}
// Recursively eliminate subtree rooted at this Cluster into a Bayes net and factor on parent
// linearize local custer factors straight into hessianFactor, which is returned
// If no ordering given, uses colamd
HessianFactor::shared_ptr linearizeToHessianFactor(
const Values& values, const Ordering& ordering,
const NonlinearFactorGraph::Dampen& dampen = nullptr) const {
return factors.linearizeToHessianFactor(values, ordering, dampen);
}
// Helper function: recursively eliminate subtree rooted at this Cluster into a Bayes net and factor on parent
// TODO(frank): Use TBB to support deep trees and parallelism
std::pair<GaussianBayesNet, HessianFactor::shared_ptr> linearizeAndEliminate(
const Values& values, boost::optional<Ordering> ordering = boost::none,
const NonlinearFactorGraph::Dampen& dampen = nullptr) const {
// Linearize and create HessianFactor f(front,separator)
HessianFactor::shared_ptr localFactor = linearizeToHessianFactor(values, ordering, dampen);
const Values& values,
const HessianFactor::shared_ptr& localFactor) const {
// Get contributions f(front) from children, as well as p(children|front)
GaussianBayesNet bayesNet;
for (const auto& child : children) {
@ -72,12 +77,45 @@ class NonlinearClusterTree : public ClusterTree<NonlinearFactorGraph> {
return {bayesNet, localFactor};
}
// Recursively eliminate subtree rooted at this Cluster into a Bayes net and factor on parent
// TODO(frank): Use TBB to support deep trees and parallelism
std::pair<GaussianBayesNet, HessianFactor::shared_ptr> linearizeAndEliminate(
const Values& values,
const NonlinearFactorGraph::Dampen& dampen = nullptr) const {
// Linearize and create HessianFactor f(front,separator)
HessianFactor::shared_ptr localFactor = linearizeToHessianFactor(values, dampen);
return linearizeAndEliminate(values, localFactor);
}
// Recursively eliminate subtree rooted at this Cluster into a Bayes net and factor on parent
// TODO(frank): Use TBB to support deep trees and parallelism
std::pair<GaussianBayesNet, HessianFactor::shared_ptr> linearizeAndEliminate(
const Values& values, const Ordering& ordering,
const NonlinearFactorGraph::Dampen& dampen = nullptr) const {
// Linearize and create HessianFactor f(front,separator)
HessianFactor::shared_ptr localFactor = linearizeToHessianFactor(values, ordering, dampen);
return linearizeAndEliminate(values, localFactor);
}
// Recursively eliminate subtree rooted at this Cluster
// Version that updates existing Bayes net and returns a new Hessian factor on parent clique
// It is possible to pass in a nullptr for the bayesNet if only interested in the new factor
HessianFactor::shared_ptr linearizeAndEliminate(
const Values& values, GaussianBayesNet* bayesNet,
boost::optional<Ordering> ordering = boost::none,
const NonlinearFactorGraph::Dampen& dampen = nullptr) const {
auto bayesNet_newFactor_pair = linearizeAndEliminate(values, dampen);
if (bayesNet) {
bayesNet->push_back(bayesNet_newFactor_pair.first);
}
return bayesNet_newFactor_pair.second;
}
// Recursively eliminate subtree rooted at this Cluster
// Version that updates existing Bayes net and returns a new Hessian factor on parent clique
// It is possible to pass in a nullptr for the bayesNet if only interested in the new factor
HessianFactor::shared_ptr linearizeAndEliminate(
const Values& values, GaussianBayesNet* bayesNet,
const Ordering& ordering,
const NonlinearFactorGraph::Dampen& dampen = nullptr) const {
auto bayesNet_newFactor_pair = linearizeAndEliminate(values, ordering, dampen);
if (bayesNet) {

View File

@ -0,0 +1,73 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% GTSAM Copyright 2010, Georgia Tech Research Corporation,
% Atlanta, Georgia 30332-0415
% All Rights Reserved
% Authors: Frank Dellaert, et al. (see THANKS for the full author list)
%
% See LICENSE for the license information
%
% @brief Plot visualizations of residuals, residual derivatives, and weights for the various mEstimators.
% @author Varun Agrawal
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% import gtsam.*
close all;
x = linspace(-10, 10, 1000);
%% Define all the mEstimator models and plot them
c = 1.3998;
fairNoiseModel = gtsam.noiseModel.mEstimator.Fair(c);
plot_m_estimator(x, fairNoiseModel, 'Fair', 1, 'fair.png')
c = 1.345;
huberNoiseModel = gtsam.noiseModel.mEstimator.Huber(c);
plot_m_estimator(x, huberNoiseModel, 'Huber', 2, 'huber.png')
c = 0.1;
cauchyNoiseModel = gtsam.noiseModel.mEstimator.Cauchy(c);
plot_m_estimator(x, cauchyNoiseModel, 'Cauchy', 3, 'cauchy.png')
c = 1.0;
gemanmcclureNoiseModel = gtsam.noiseModel.mEstimator.GemanMcClure(c);
plot_m_estimator(x, gemanmcclureNoiseModel, 'Geman-McClure', 4, 'gemanmcclure.png')
c = 2.9846;
welschNoiseModel = gtsam.noiseModel.mEstimator.Welsch(c);
plot_m_estimator(x, welschNoiseModel, 'Welsch', 5, 'welsch.png')
c = 4.6851;
tukeyNoiseModel = gtsam.noiseModel.mEstimator.Tukey(c);
plot_m_estimator(x, tukeyNoiseModel, 'Tukey', 6, 'tukey.png')
%% Plot rho, psi and weights of the mEstimator.
function plot_m_estimator(x, model, plot_title, fig_id, filename)
w = zeros(size(x));
rho = zeros(size(x));
for i = 1:size(x, 2)
w(i) = model.weight(x(i));
rho(i) = model.residual(x(i));
end
psi = w .* x;
figure(fig_id);
subplot(3, 1, 1);
plot(x, rho, 'LineWidth',2);
title('rho function');
xlim([-5, 5]);
subplot(3, 1, 2);
plot(x, psi, 'LineWidth',2);
title('influence function');
xlim([-5, 5]);
subplot(3, 1, 3);
plot(x, w, 'LineWidth',2);
title('weight function');
xlim([-5, 5]);
sgtitle(plot_title, 'FontSize', 26);
saveas(figure(fig_id), filename);
end

View File

@ -159,10 +159,10 @@ namespace example {
namespace impl {
typedef boost::shared_ptr<NonlinearFactor> shared_nlf;
static SharedDiagonal sigma1_0 = noiseModel::Isotropic::Sigma(2,1.0);
static SharedDiagonal sigma0_1 = noiseModel::Isotropic::Sigma(2,0.1);
static SharedDiagonal sigma0_2 = noiseModel::Isotropic::Sigma(2,0.2);
static SharedDiagonal constraintModel = noiseModel::Constrained::All(2);
static SharedDiagonal kSigma1_0 = noiseModel::Isotropic::Sigma(2,1.0);
static SharedDiagonal kSigma0_1 = noiseModel::Isotropic::Sigma(2,0.1);
static SharedDiagonal kSigma0_2 = noiseModel::Isotropic::Sigma(2,0.2);
static SharedDiagonal kConstrainedModel = noiseModel::Constrained::All(2);
static const Key _l1_=0, _x1_=1, _x2_=2;
static const Key _x_=0, _y_=1, _z_=2;
@ -170,40 +170,43 @@ static const Key _x_=0, _y_=1, _z_=2;
/* ************************************************************************* */
inline boost::shared_ptr<const NonlinearFactorGraph> sharedNonlinearFactorGraph() {
inline boost::shared_ptr<const NonlinearFactorGraph>
sharedNonlinearFactorGraph(const SharedNoiseModel &noiseModel1 = impl::kSigma0_1,
const SharedNoiseModel &noiseModel2 = impl::kSigma0_2) {
using namespace impl;
using symbol_shorthand::X;
using symbol_shorthand::L;
using symbol_shorthand::X;
// Create
boost::shared_ptr<NonlinearFactorGraph> nlfg(
new NonlinearFactorGraph);
boost::shared_ptr<NonlinearFactorGraph> nlfg(new NonlinearFactorGraph);
// prior on x1
Point2 mu(0,0);
shared_nlf f1(new simulated2D::Prior(mu, sigma0_1, X(1)));
Point2 mu(0, 0);
shared_nlf f1(new simulated2D::Prior(mu, noiseModel1, X(1)));
nlfg->push_back(f1);
// odometry between x1 and x2
Point2 z2(1.5, 0);
shared_nlf f2(new simulated2D::Odometry(z2, sigma0_1, X(1), X(2)));
shared_nlf f2(new simulated2D::Odometry(z2, noiseModel1, X(1), X(2)));
nlfg->push_back(f2);
// measurement between x1 and l1
Point2 z3(0, -1);
shared_nlf f3(new simulated2D::Measurement(z3, sigma0_2, X(1), L(1)));
shared_nlf f3(new simulated2D::Measurement(z3, noiseModel2, X(1), L(1)));
nlfg->push_back(f3);
// measurement between x2 and l1
Point2 z4(-1.5, -1.);
shared_nlf f4(new simulated2D::Measurement(z4, sigma0_2, X(2), L(1)));
shared_nlf f4(new simulated2D::Measurement(z4, noiseModel2, X(2), L(1)));
nlfg->push_back(f4);
return nlfg;
}
/* ************************************************************************* */
inline NonlinearFactorGraph createNonlinearFactorGraph() {
return *sharedNonlinearFactorGraph();
inline NonlinearFactorGraph
createNonlinearFactorGraph(const SharedNoiseModel &noiseModel1 = impl::kSigma0_1,
const SharedNoiseModel &noiseModel2 = impl::kSigma0_2) {
return *sharedNonlinearFactorGraph(noiseModel1, noiseModel2);
}
/* ************************************************************************* */
@ -372,19 +375,19 @@ inline std::pair<NonlinearFactorGraph, Values> createNonlinearSmoother(int T) {
// prior on x1
Point2 x1(1.0, 0.0);
shared_nlf prior(new simulated2D::Prior(x1, sigma1_0, X(1)));
shared_nlf prior(new simulated2D::Prior(x1, kSigma1_0, X(1)));
nlfg.push_back(prior);
poses.insert(X(1), x1);
for (int t = 2; t <= T; t++) {
// odometry between x_t and x_{t-1}
Point2 odo(1.0, 0.0);
shared_nlf odometry(new simulated2D::Odometry(odo, sigma1_0, X(t - 1), X(t)));
shared_nlf odometry(new simulated2D::Odometry(odo, kSigma1_0, X(t - 1), X(t)));
nlfg.push_back(odometry);
// measurement on x_t is like perfect GPS
Point2 xt(t, 0);
shared_nlf measurement(new simulated2D::Prior(xt, sigma1_0, X(t)));
shared_nlf measurement(new simulated2D::Prior(xt, kSigma1_0, X(t)));
nlfg.push_back(measurement);
// initial estimate
@ -412,7 +415,7 @@ inline GaussianFactorGraph createSimpleConstraintGraph() {
Vector b1(2);
b1(0) = 1.0;
b1(1) = -1.0;
JacobianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, sigma0_1));
JacobianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, kSigma0_1));
// create binary constraint factor
// between _x_ and _y_, that is going to be the only factor on _y_
@ -422,7 +425,7 @@ inline GaussianFactorGraph createSimpleConstraintGraph() {
Matrix Ay1 = I_2x2 * -1;
Vector b2 = Vector2(0.0, 0.0);
JacobianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2,
constraintModel));
kConstrainedModel));
// construct the graph
GaussianFactorGraph fg;
@ -453,8 +456,8 @@ inline GaussianFactorGraph createSingleConstraintGraph() {
Vector b1(2);
b1(0) = 1.0;
b1(1) = -1.0;
//GaussianFactor::shared_ptr f1(new JacobianFactor(_x_, sigma0_1->Whiten(Ax), sigma0_1->whiten(b1), sigma0_1));
JacobianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, sigma0_1));
//GaussianFactor::shared_ptr f1(new JacobianFactor(_x_, kSigma0_1->Whiten(Ax), kSigma0_1->whiten(b1), kSigma0_1));
JacobianFactor::shared_ptr f1(new JacobianFactor(_x_, Ax, b1, kSigma0_1));
// create binary constraint factor
// between _x_ and _y_, that is going to be the only factor on _y_
@ -468,7 +471,7 @@ inline GaussianFactorGraph createSingleConstraintGraph() {
Matrix Ay1 = I_2x2 * 10;
Vector b2 = Vector2(1.0, 2.0);
JacobianFactor::shared_ptr f2(new JacobianFactor(_x_, Ax1, _y_, Ay1, b2,
constraintModel));
kConstrainedModel));
// construct the graph
GaussianFactorGraph fg;
@ -493,7 +496,7 @@ inline GaussianFactorGraph createMultiConstraintGraph() {
// unary factor 1
Matrix A = I_2x2;
Vector b = Vector2(-2.0, 2.0);
JacobianFactor::shared_ptr lf1(new JacobianFactor(_x_, A, b, sigma0_1));
JacobianFactor::shared_ptr lf1(new JacobianFactor(_x_, A, b, kSigma0_1));
// constraint 1
Matrix A11(2, 2);
@ -512,7 +515,7 @@ inline GaussianFactorGraph createMultiConstraintGraph() {
b1(0) = 1.0;
b1(1) = 2.0;
JacobianFactor::shared_ptr lc1(new JacobianFactor(_x_, A11, _y_, A12, b1,
constraintModel));
kConstrainedModel));
// constraint 2
Matrix A21(2, 2);
@ -531,7 +534,7 @@ inline GaussianFactorGraph createMultiConstraintGraph() {
b2(0) = 3.0;
b2(1) = 4.0;
JacobianFactor::shared_ptr lc2(new JacobianFactor(_x_, A21, _z_, A22, b2,
constraintModel));
kConstrainedModel));
// construct the graph
GaussianFactorGraph fg;

View File

@ -206,7 +206,7 @@ TEST(GaussianFactorGraph, optimize_Cholesky) {
GaussianFactorGraph fg = createGaussianFactorGraph();
// optimize the graph
VectorValues actual = fg.optimize(boost::none, EliminateCholesky);
VectorValues actual = fg.optimize(EliminateCholesky);
// verify
VectorValues expected = createCorrectDelta();
@ -220,7 +220,7 @@ TEST( GaussianFactorGraph, optimize_QR )
GaussianFactorGraph fg = createGaussianFactorGraph();
// optimize the graph
VectorValues actual = fg.optimize(boost::none, EliminateQR);
VectorValues actual = fg.optimize(EliminateQR);
// verify
VectorValues expected = createCorrectDelta();

View File

@ -87,6 +87,12 @@ TEST(Marginals, planarSLAMmarginals) {
soln.insert(x3, Pose2(4.0, 0.0, 0.0));
soln.insert(l1, Point2(2.0, 2.0));
soln.insert(l2, Point2(4.0, 2.0));
VectorValues soln_lin;
soln_lin.insert(x1, Vector3(0.0, 0.0, 0.0));
soln_lin.insert(x2, Vector3(2.0, 0.0, 0.0));
soln_lin.insert(x3, Vector3(4.0, 0.0, 0.0));
soln_lin.insert(l1, Vector2(2.0, 2.0));
soln_lin.insert(l2, Vector2(4.0, 2.0));
Matrix expectedx1(3,3);
expectedx1 <<
@ -110,71 +116,80 @@ TEST(Marginals, planarSLAMmarginals) {
Matrix expectedl2(2,2);
expectedl2 <<
0.293870968, -0.104516129,
-0.104516129, 0.391935484;
-0.104516129, 0.391935484;
// Check marginals covariances for all variables (Cholesky mode)
Marginals marginals(graph, soln, Marginals::CHOLESKY);
EXPECT(assert_equal(expectedx1, marginals.marginalCovariance(x1), 1e-8));
EXPECT(assert_equal(expectedx2, marginals.marginalCovariance(x2), 1e-8));
EXPECT(assert_equal(expectedx3, marginals.marginalCovariance(x3), 1e-8));
EXPECT(assert_equal(expectedl1, marginals.marginalCovariance(l1), 1e-8));
EXPECT(assert_equal(expectedl2, marginals.marginalCovariance(l2), 1e-8));
auto testMarginals = [&] (Marginals marginals) {
EXPECT(assert_equal(expectedx1, marginals.marginalCovariance(x1), 1e-8));
EXPECT(assert_equal(expectedx2, marginals.marginalCovariance(x2), 1e-8));
EXPECT(assert_equal(expectedx3, marginals.marginalCovariance(x3), 1e-8));
EXPECT(assert_equal(expectedl1, marginals.marginalCovariance(l1), 1e-8));
EXPECT(assert_equal(expectedl2, marginals.marginalCovariance(l2), 1e-8));
};
// Check marginals covariances for all variables (QR mode)
auto testJointMarginals = [&] (Marginals marginals) {
// Check joint marginals for 3 variables
Matrix expected_l2x1x3(8,8);
expected_l2x1x3 <<
0.293871159514111, -0.104516127560770, 0.090000180000270, -0.000000000000000, -0.020000000000000, 0.151935669757191, -0.104516127560770, -0.050967744878460,
-0.104516127560770, 0.391935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, 0.007741936219615, 0.351935664055174, 0.056129031890193,
0.090000180000270, 0.000000000000000, 0.090000180000270, -0.000000000000000, 0.000000000000000, 0.090000180000270, 0.000000000000000, 0.000000000000000,
-0.000000000000000, 0.090000180000270, -0.000000000000000, 0.090000180000270, 0.000000000000000, -0.000000000000000, 0.090000180000270, 0.000000000000000,
-0.020000000000000, 0.040000000000000, 0.000000000000000, 0.000000000000000, 0.010000000000000, 0.000000000000000, 0.040000000000000, 0.010000000000000,
0.151935669757191, 0.007741936219615, 0.090000180000270, -0.000000000000000, 0.000000000000000, 0.160967924878730, 0.007741936219615, 0.004516127560770,
-0.104516127560770, 0.351935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, 0.007741936219615, 0.351935664055174, 0.056129031890193,
-0.050967744878460, 0.056129031890193, 0.000000000000000, 0.000000000000000, 0.010000000000000, 0.004516127560770, 0.056129031890193, 0.027741936219615;
KeyVector variables {x1, l2, x3};
JointMarginal joint_l2x1x3 = marginals.jointMarginalCovariance(variables);
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,0,2,2)), Matrix(joint_l2x1x3(l2,l2)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,0,3,2)), Matrix(joint_l2x1x3(x1,l2)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,0,3,2)), Matrix(joint_l2x1x3(x3,l2)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,2,2,3)), Matrix(joint_l2x1x3(l2,x1)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,2,3,3)), Matrix(joint_l2x1x3(x1,x1)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,2,3,3)), Matrix(joint_l2x1x3(x3,x1)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,5,2,3)), Matrix(joint_l2x1x3(l2,x3)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,5,3,3)), Matrix(joint_l2x1x3(x1,x3)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,5,3,3)), Matrix(joint_l2x1x3(x3,x3)), 1e-6));
// Check joint marginals for 2 variables (different code path than >2 variable case above)
Matrix expected_l2x1(5,5);
expected_l2x1 <<
0.293871159514111, -0.104516127560770, 0.090000180000270, -0.000000000000000, -0.020000000000000,
-0.104516127560770, 0.391935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000,
0.090000180000270, 0.000000000000000, 0.090000180000270, -0.000000000000000, 0.000000000000000,
-0.000000000000000, 0.090000180000270, -0.000000000000000, 0.090000180000270, 0.000000000000000,
-0.020000000000000, 0.040000000000000, 0.000000000000000, 0.000000000000000, 0.010000000000000;
variables.resize(2);
variables[0] = l2;
variables[1] = x1;
JointMarginal joint_l2x1 = marginals.jointMarginalCovariance(variables);
EXPECT(assert_equal(Matrix(expected_l2x1.block(0,0,2,2)), Matrix(joint_l2x1(l2,l2)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1.block(2,0,3,2)), Matrix(joint_l2x1(x1,l2)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1.block(0,2,2,3)), Matrix(joint_l2x1(l2,x1)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1.block(2,2,3,3)), Matrix(joint_l2x1(x1,x1)), 1e-6));
// Check joint marginal for 1 variable (different code path than >1 variable cases above)
variables.resize(1);
variables[0] = x1;
JointMarginal joint_x1 = marginals.jointMarginalCovariance(variables);
EXPECT(assert_equal(expectedx1, Matrix(joint_l2x1(x1,x1)), 1e-6));
};
Marginals marginals;
marginals = Marginals(graph, soln, Marginals::CHOLESKY);
testMarginals(marginals);
marginals = Marginals(graph, soln, Marginals::QR);
EXPECT(assert_equal(expectedx1, marginals.marginalCovariance(x1), 1e-8));
EXPECT(assert_equal(expectedx2, marginals.marginalCovariance(x2), 1e-8));
EXPECT(assert_equal(expectedx3, marginals.marginalCovariance(x3), 1e-8));
EXPECT(assert_equal(expectedl1, marginals.marginalCovariance(l1), 1e-8));
EXPECT(assert_equal(expectedl2, marginals.marginalCovariance(l2), 1e-8));
testMarginals(marginals);
testJointMarginals(marginals);
// Check joint marginals for 3 variables
Matrix expected_l2x1x3(8,8);
expected_l2x1x3 <<
0.293871159514111, -0.104516127560770, 0.090000180000270, -0.000000000000000, -0.020000000000000, 0.151935669757191, -0.104516127560770, -0.050967744878460,
-0.104516127560770, 0.391935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, 0.007741936219615, 0.351935664055174, 0.056129031890193,
0.090000180000270, 0.000000000000000, 0.090000180000270, -0.000000000000000, 0.000000000000000, 0.090000180000270, 0.000000000000000, 0.000000000000000,
-0.000000000000000, 0.090000180000270, -0.000000000000000, 0.090000180000270, 0.000000000000000, -0.000000000000000, 0.090000180000270, 0.000000000000000,
-0.020000000000000, 0.040000000000000, 0.000000000000000, 0.000000000000000, 0.010000000000000, 0.000000000000000, 0.040000000000000, 0.010000000000000,
0.151935669757191, 0.007741936219615, 0.090000180000270, -0.000000000000000, 0.000000000000000, 0.160967924878730, 0.007741936219615, 0.004516127560770,
-0.104516127560770, 0.351935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000, 0.007741936219615, 0.351935664055174, 0.056129031890193,
-0.050967744878460, 0.056129031890193, 0.000000000000000, 0.000000000000000, 0.010000000000000, 0.004516127560770, 0.056129031890193, 0.027741936219615;
KeyVector variables {x1, l2, x3};
JointMarginal joint_l2x1x3 = marginals.jointMarginalCovariance(variables);
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,0,2,2)), Matrix(joint_l2x1x3(l2,l2)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,0,3,2)), Matrix(joint_l2x1x3(x1,l2)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,0,3,2)), Matrix(joint_l2x1x3(x3,l2)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,2,2,3)), Matrix(joint_l2x1x3(l2,x1)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,2,3,3)), Matrix(joint_l2x1x3(x1,x1)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,2,3,3)), Matrix(joint_l2x1x3(x3,x1)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(0,5,2,3)), Matrix(joint_l2x1x3(l2,x3)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(2,5,3,3)), Matrix(joint_l2x1x3(x1,x3)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1x3.block(5,5,3,3)), Matrix(joint_l2x1x3(x3,x3)), 1e-6));
// Check joint marginals for 2 variables (different code path than >2 variable case above)
Matrix expected_l2x1(5,5);
expected_l2x1 <<
0.293871159514111, -0.104516127560770, 0.090000180000270, -0.000000000000000, -0.020000000000000,
-0.104516127560770, 0.391935664055174, 0.000000000000000, 0.090000180000270, 0.040000000000000,
0.090000180000270, 0.000000000000000, 0.090000180000270, -0.000000000000000, 0.000000000000000,
-0.000000000000000, 0.090000180000270, -0.000000000000000, 0.090000180000270, 0.000000000000000,
-0.020000000000000, 0.040000000000000, 0.000000000000000, 0.000000000000000, 0.010000000000000;
variables.resize(2);
variables[0] = l2;
variables[1] = x1;
JointMarginal joint_l2x1 = marginals.jointMarginalCovariance(variables);
EXPECT(assert_equal(Matrix(expected_l2x1.block(0,0,2,2)), Matrix(joint_l2x1(l2,l2)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1.block(2,0,3,2)), Matrix(joint_l2x1(x1,l2)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1.block(0,2,2,3)), Matrix(joint_l2x1(l2,x1)), 1e-6));
EXPECT(assert_equal(Matrix(expected_l2x1.block(2,2,3,3)), Matrix(joint_l2x1(x1,x1)), 1e-6));
// Check joint marginal for 1 variable (different code path than >1 variable cases above)
variables.resize(1);
variables[0] = x1;
JointMarginal joint_x1 = marginals.jointMarginalCovariance(variables);
EXPECT(assert_equal(expectedx1, Matrix(joint_l2x1(x1,x1)), 1e-6));
GaussianFactorGraph gfg = *graph.linearize(soln);
marginals = Marginals(gfg, soln_lin, Marginals::CHOLESKY);
testMarginals(marginals);
marginals = Marginals(gfg, soln_lin, Marginals::QR);
testMarginals(marginals);
testJointMarginals(marginals);
}
/* ************************************************************************* */
@ -222,17 +237,26 @@ TEST(Marginals, order) {
vals.at<Pose2>(3).bearing(vals.at<Point2>(101)),
vals.at<Pose2>(3).range(vals.at<Point2>(101)), noiseModel::Unit::Create(2));
auto testMarginals = [&] (Marginals marginals, KeySet set) {
KeyVector keys(set.begin(), set.end());
JointMarginal joint = marginals.jointMarginalCovariance(keys);
LONGS_EQUAL(3, (long)joint(0,0).rows());
LONGS_EQUAL(3, (long)joint(1,1).rows());
LONGS_EQUAL(3, (long)joint(2,2).rows());
LONGS_EQUAL(3, (long)joint(3,3).rows());
LONGS_EQUAL(2, (long)joint(100,100).rows());
LONGS_EQUAL(2, (long)joint(101,101).rows());
};
Marginals marginals(fg, vals);
KeySet set = fg.keys();
KeyVector keys(set.begin(), set.end());
JointMarginal joint = marginals.jointMarginalCovariance(keys);
testMarginals(marginals, set);
LONGS_EQUAL(3, (long)joint(0,0).rows());
LONGS_EQUAL(3, (long)joint(1,1).rows());
LONGS_EQUAL(3, (long)joint(2,2).rows());
LONGS_EQUAL(3, (long)joint(3,3).rows());
LONGS_EQUAL(2, (long)joint(100,100).rows());
LONGS_EQUAL(2, (long)joint(101,101).rows());
GaussianFactorGraph gfg = *fg.linearize(vals);
marginals = Marginals(gfg, vals);
set = gfg.keys();
testMarginals(marginals, set);
}
/* ************************************************************************* */

View File

@ -198,7 +198,7 @@ TEST(NonlinearFactorGraph, UpdateCholesky) {
}
}
};
EXPECT(assert_equal(initial, fg.updateCholesky(initial, boost::none, dampen), 1e-6));
EXPECT(assert_equal(initial, fg.updateCholesky(initial, dampen), 1e-6));
}
/* ************************************************************************* */

View File

@ -40,7 +40,7 @@ double timeKalmanSmoother(int T) {
/* ************************************************************************* */
// Create a planar factor graph and optimize
double timePlanarSmoother(int N, bool old = true) {
GaussianFactorGraph fg = planarGraph(N).get<0>();
GaussianFactorGraph fg = planarGraph(N).first;
clock_t start = clock();
fg.optimize();
clock_t end = clock ();
@ -51,7 +51,7 @@ double timePlanarSmoother(int N, bool old = true) {
/* ************************************************************************* */
// Create a planar factor graph and eliminate
double timePlanarSmootherEliminate(int N, bool old = true) {
GaussianFactorGraph fg = planarGraph(N).get<0>();
GaussianFactorGraph fg = planarGraph(N).first;
clock_t start = clock();
fg.eliminateMultifrontal();
clock_t end = clock ();

View File

@ -1,14 +1,9 @@
# Build/install Wrap
set(WRAP_BOOST_LIBRARIES
optimized
${Boost_FILESYSTEM_LIBRARY_RELEASE}
${Boost_SYSTEM_LIBRARY_RELEASE}
${Boost_THREAD_LIBRARY_RELEASE}
debug
${Boost_FILESYSTEM_LIBRARY_DEBUG}
${Boost_SYSTEM_LIBRARY_DEBUG}
${Boost_THREAD_LIBRARY_DEBUG}
Boost::system
Boost::filesystem
Boost::thread
)
# Allow for disabling serialization to handle errors related to Clang's linker
@ -30,8 +25,6 @@ endif()
gtsam_apply_build_flags(wrap_lib)
target_link_libraries(wrap_lib ${WRAP_BOOST_LIBRARIES})
target_include_directories(wrap_lib PUBLIC ${Boost_INCLUDE_DIR})
gtsam_assign_source_folders(${wrap_srcs} ${wrap_headers})
add_executable(wrap wrap.cpp)
target_link_libraries(wrap PRIVATE wrap_lib)