Merge remote-tracking branch 'origin/develop' into feature/improvementsIncrementalFilter

release/4.3a0
Frank Dellaert 2019-05-17 22:19:27 -04:00
commit 67b65f9845
483 changed files with 8369 additions and 3971 deletions

1
.gitignore vendored
View File

@ -1,4 +1,5 @@
/build* /build*
/debug*
.idea .idea
*.pyc *.pyc
*.DS_Store *.DS_Store

View File

@ -84,6 +84,12 @@ set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "The version of python to build
if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND NOT GTSAM_BUILD_WRAP) if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND NOT GTSAM_BUILD_WRAP)
message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX or GTSAM_INSTALL_CYTHON_TOOLBOX is enabled, please also enable GTSAM_BUILD_WRAP") message(FATAL_ERROR "GTSAM_INSTALL_MATLAB_TOOLBOX or GTSAM_INSTALL_CYTHON_TOOLBOX is enabled, please also enable GTSAM_BUILD_WRAP")
endif() endif()
if((GTSAM_INSTALL_MATLAB_TOOLBOX OR GTSAM_INSTALL_CYTHON_TOOLBOX) AND GTSAM_BUILD_TYPE_POSTFIXES)
set(CURRENT_POSTFIX ${CMAKE_${CMAKE_BUILD_TYPE}_POSTFIX})
if(NOT "${CURRENT_POSTFIX}" STREQUAL "")
message(FATAL_ERROR "Cannot use executable postfixes with the matlab or cython wrappers. Please disable GTSAM_BUILD_TYPE_POSTFIXES")
endif()
endif()
if(GTSAM_INSTALL_WRAP AND NOT GTSAM_BUILD_WRAP) if(GTSAM_INSTALL_WRAP AND NOT GTSAM_BUILD_WRAP)
message(FATAL_ERROR "GTSAM_INSTALL_WRAP is enabled, please also enable GTSAM_BUILD_WRAP") message(FATAL_ERROR "GTSAM_INSTALL_WRAP is enabled, please also enable GTSAM_BUILD_WRAP")
endif() endif()
@ -130,6 +136,14 @@ if(MSVC)
endif() endif()
endif() endif()
# If building DLLs in MSVC, we need to avoid EIGEN_STATIC_ASSERT()
# or explicit instantiation will generate build errors.
# See: https://bitbucket.org/gtborg/gtsam/issues/417/fail-to-build-on-msvc-2017
#
if(MSVC AND BUILD_SHARED_LIBS)
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC EIGEN_NO_STATIC_ASSERT)
endif()
# Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such. # Store these in variables so they are automatically replicated in GTSAMConfig.cmake and such.
set(BOOST_FIND_MINIMUM_VERSION 1.43) set(BOOST_FIND_MINIMUM_VERSION 1.43)
set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex) set(BOOST_FIND_MINIMUM_COMPONENTS serialization system filesystem thread program_options date_time timer chrono regex)
@ -142,22 +156,43 @@ if(NOT Boost_SERIALIZATION_LIBRARY OR NOT Boost_SYSTEM_LIBRARY OR NOT Boost_FILE
message(FATAL_ERROR "Missing required Boost components >= v1.43, please install/upgrade Boost or configure your search paths.") message(FATAL_ERROR "Missing required Boost components >= v1.43, please install/upgrade Boost or configure your search paths.")
endif() endif()
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
# Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library) # Allow for not using the timer libraries on boost < 1.48 (GTSAM timing code falls back to old timer library)
option(GTSAM_DISABLE_NEW_TIMERS "Disables using Boost.chrono for timing" OFF)
# JLBC: This was once updated to target-based names (Boost::xxx), but it caused
# problems with Boost versions newer than FindBoost.cmake was prepared to handle,
# so we downgraded this to classic filenames-based variables, and manually adding
# the target_include_directories(xxx ${Boost_INCLUDE_DIR})
set(GTSAM_BOOST_LIBRARIES set(GTSAM_BOOST_LIBRARIES
Boost::serialization optimized
Boost::system ${Boost_SERIALIZATION_LIBRARY_RELEASE}
Boost::filesystem ${Boost_SYSTEM_LIBRARY_RELEASE}
Boost::thread ${Boost_FILESYSTEM_LIBRARY_RELEASE}
Boost::date_time ${Boost_THREAD_LIBRARY_RELEASE}
Boost::regex ${Boost_DATE_TIME_LIBRARY_RELEASE}
${Boost_REGEX_LIBRARY_RELEASE}
debug
${Boost_SERIALIZATION_LIBRARY_DEBUG}
${Boost_SYSTEM_LIBRARY_DEBUG}
${Boost_FILESYSTEM_LIBRARY_DEBUG}
${Boost_THREAD_LIBRARY_DEBUG}
${Boost_DATE_TIME_LIBRARY_DEBUG}
${Boost_REGEX_LIBRARY_DEBUG}
) )
message(STATUS "GTSAM_BOOST_LIBRARIES: ${GTSAM_BOOST_LIBRARIES}")
if (GTSAM_DISABLE_NEW_TIMERS) if (GTSAM_DISABLE_NEW_TIMERS)
message("WARNING: GTSAM timing instrumentation manually disabled") message("WARNING: GTSAM timing instrumentation manually disabled")
list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS) list(APPEND GTSAM_COMPILE_DEFINITIONS_PUBLIC DGTSAM_DISABLE_NEW_TIMERS)
else() else()
if(Boost_TIMER_LIBRARY) if(Boost_TIMER_LIBRARY)
list(APPEND GTSAM_BOOST_LIBRARIES Boost::timer Boost::chrono) list(APPEND GTSAM_BOOST_LIBRARIES
optimized
${Boost_TIMER_LIBRARY_RELEASE}
${Boost_CHRONO_LIBRARY_RELEASE}
debug
${Boost_TIMER_LIBRARY_DEBUG}
${Boost_CHRONO_LIBRARY_DEBUG}
)
else() else()
list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt list(APPEND GTSAM_BOOST_LIBRARIES rt) # When using the header-only boost timer library, need -lrt
message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.") message("WARNING: GTSAM timing instrumentation will use the older, less accurate, Boost timer library because boost older than 1.48 was found.")

View File

@ -6,7 +6,7 @@ file(GLOB cppunitlite_src "*.cpp")
add_library(CppUnitLite STATIC ${cppunitlite_src} ${cppunitlite_headers}) add_library(CppUnitLite STATIC ${cppunitlite_src} ${cppunitlite_headers})
list(APPEND GTSAM_EXPORTED_TARGETS CppUnitLite) list(APPEND GTSAM_EXPORTED_TARGETS CppUnitLite)
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE) set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
target_link_libraries(CppUnitLite PUBLIC Boost::boost) # boost/lexical_cast.h target_include_directories(CppUnitLite PUBLIC ${Boost_INCLUDE_DIR}) # boost/lexical_cast.h
gtsam_assign_source_folders("${cppunitlite_headers};${cppunitlite_src}") # MSVC project structure gtsam_assign_source_folders("${cppunitlite_headers};${cppunitlite_src}") # MSVC project structure

View File

@ -166,3 +166,30 @@ Another useful debugging symbol is _GLIBCXX_DEBUG, which enables debug checks an
NOTE: The native Snow Leopard g++ compiler/library contains a bug that makes it impossible to use _GLIBCXX_DEBUG. MacPorts g++ compilers do work with it though. NOTE: The native Snow Leopard g++ compiler/library contains a bug that makes it impossible to use _GLIBCXX_DEBUG. MacPorts g++ compilers do work with it though.
NOTE: If _GLIBCXX_DEBUG is used to compile gtsam, anything that links against gtsam will need to be compiled with _GLIBCXX_DEBUG as well, due to the use of header-only Eigen. NOTE: If _GLIBCXX_DEBUG is used to compile gtsam, anything that links against gtsam will need to be compiled with _GLIBCXX_DEBUG as well, due to the use of header-only Eigen.
## Installing MKL on Linux
Intel has a guide for installing MKL on Linux through APT repositories at <https://software.intel.com/en-us/articles/installing-intel-free-libs-and-python-apt-repo>.
After following the instructions, add the following to your `~/.bashrc` (and afterwards, open a new terminal before compiling GTSAM):
`LD_PRELOAD` need only be set if you are building the cython wrapper to use GTSAM from python.
```sh
source /opt/intel/mkl/bin/mklvars.sh intel64
export LD_PRELOAD="$LD_PRELOAD:/opt/intel/mkl/lib/intel64/libmkl_core.so:/opt/intel/mkl/lib/intel64/libmkl_sequential.so"
```
To use MKL in GTSAM pass the flag `-DGTSAM_WITH_EIGEN_MKL=ON` to cmake.
The `LD_PRELOAD` fix seems to be related to a well known problem with MKL which leads to lots of undefined symbol errors, for example:
- <https://software.intel.com/en-us/forums/intel-math-kernel-library/topic/300857>
- <https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628976>
- <https://groups.google.com/a/continuum.io/forum/#!topic/anaconda/J3YGoef64z8>
Failing to specify `LD_PRELOAD` may lead to errors such as:
`ImportError: /opt/intel/mkl/lib/intel64/libmkl_vml_avx2.so: undefined symbol: mkl_serv_getenv`
or
`Intel MKL FATAL ERROR: Cannot load libmkl_avx2.so or libmkl_def.so.`
when importing GTSAM using the cython wrapper in python.

View File

@ -36,7 +36,9 @@ Prerequisites:
Optional prerequisites - used automatically if findable by CMake: Optional prerequisites - used automatically if findable by CMake:
- [Intel Threaded Building Blocks (TBB)](http://www.threadingbuildingblocks.org/) (Ubuntu: `sudo apt-get install libtbb-dev`) - [Intel Threaded Building Blocks (TBB)](http://www.threadingbuildingblocks.org/) (Ubuntu: `sudo apt-get install libtbb-dev`)
- [Intel Math Kernel Library (MKL)](http://software.intel.com/en-us/intel-mkl) - [Intel Math Kernel Library (MKL)](http://software.intel.com/en-us/intel-mkl) (Ubuntu: [installing using APT](https://software.intel.com/en-us/articles/installing-intel-free-libs-and-python-apt-repo))
- See [INSTALL.md](INSTALL.md) for more installation information
- Note that MKL may not provide a speedup in all cases. Make sure to benchmark your problem with and without MKL.
GTSAM 4 Compatibility GTSAM 4 Compatibility
--------------------- ---------------------

View File

@ -30,9 +30,12 @@ find_package(GTSAM REQUIRED) # Uses installed package
################################################################################### ###################################################################################
# Build static library from common sources # Build static library from common sources
set(CONVENIENCE_LIB_NAME ${PROJECT_NAME}) set(CONVENIENCE_LIB_NAME ${PROJECT_NAME})
add_library(${CONVENIENCE_LIB_NAME} STATIC example/PrintExamples.h example/PrintExamples.cpp) add_library(${CONVENIENCE_LIB_NAME} SHARED example/PrintExamples.h example/PrintExamples.cpp)
target_link_libraries(${CONVENIENCE_LIB_NAME} gtsam) target_link_libraries(${CONVENIENCE_LIB_NAME} gtsam)
# Install library
install(TARGETS ${CONVENIENCE_LIB_NAME} LIBRARY DESTINATION lib ARCHIVE DESTINATION lib RUNTIME DESTINATION bin)
################################################################################### ###################################################################################
# Build tests (CMake tracks the dependecy to link with GTSAM through our project's static library) # Build tests (CMake tracks the dependecy to link with GTSAM through our project's static library)
gtsamAddTestsGlob("example" "tests/test*.cpp" "" "${CONVENIENCE_LIB_NAME}") gtsamAddTestsGlob("example" "tests/test*.cpp" "" "${CONVENIENCE_LIB_NAME}")

View File

@ -0,0 +1,32 @@
# MATLAB Wrapper Example Project
This project serves as a lightweight example for demonstrating how to wrap C++ code in MATLAB using GTSAM.
## Compiling
We follow the regular build procedure inside the `example_project` directory:
```sh
mkdir build && cd build
cmake ..
make -j8
sudo make install
sudo ldconfig # ensures the shared object file generated is correctly loaded
```
## Usage
Now you can open MATLAB and add the `gtsam_toolbox` to the MATLAB path
```matlab
addpath('/usr/local/gtsam_toolbox')
```
At this point you are ready to run the example project. Starting from the `example_project` directory inside MATLAB, simply run code like regular MATLAB, e.g.
```matlab
pe = example.PrintExamples();
pe.sayHello();
pe.sayGoodbye();
```

View File

@ -18,9 +18,12 @@
// This is an interface file for automatic MATLAB wrapper generation. See // This is an interface file for automatic MATLAB wrapper generation. See
// gtsam.h for full documentation and more examples. // gtsam.h for full documentation and more examples.
#include <example/PrintExamples.h>
namespace example { namespace example {
class PrintExamples { class PrintExamples {
PrintExamples();
void sayHello() const; void sayHello() const;
void sayGoodbye() const; void sayGoodbye() const;
}; };

View File

@ -17,6 +17,7 @@
#pragma once #pragma once
#include <iostream>
#include <string> #include <string>
namespace example { namespace example {

View File

@ -62,7 +62,7 @@ Examples: noiseModel_Gaussian, noiseModel_mEstimator_Tukey
- Casting from a base class to a derive class must be done explicitly. - Casting from a base class to a derive class must be done explicitly.
Examples: Examples:
```Python ```Python
noiseBase = factor.get_noiseModel() noiseBase = factor.noiseModel()
noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase) noiseGaussian = dynamic_cast_noiseModel_Gaussian_noiseModel_Base(noiseBase)
``` ```

View File

@ -0,0 +1,118 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
Example comparing DoglegOptimizer with Levenberg-Marquardt.
Author: Frank Dellaert
"""
# pylint: disable=no-member, invalid-name
import math
import argparse
import gtsam
import matplotlib.pyplot as plt
import numpy as np
def run(args):
"""Test Dogleg vs LM, inspired by issue #452."""
# print parameters
print("num samples = {}, deltaInitial = {}".format(
args.num_samples, args.delta))
# Ground truth solution
T11 = gtsam.Pose2(0, 0, 0)
T12 = gtsam.Pose2(1, 0, 0)
T21 = gtsam.Pose2(0, 1, 0)
T22 = gtsam.Pose2(1, 1, 0)
# Factor graph
graph = gtsam.NonlinearFactorGraph()
# Priors
prior = gtsam.noiseModel_Isotropic.Sigma(3, 1)
graph.add(gtsam.PriorFactorPose2(11, T11, prior))
graph.add(gtsam.PriorFactorPose2(21, T21, prior))
# Odometry
model = gtsam.noiseModel_Diagonal.Sigmas(np.array([0.01, 0.01, 0.3]))
graph.add(gtsam.BetweenFactorPose2(11, 12, T11.between(T12), model))
graph.add(gtsam.BetweenFactorPose2(21, 22, T21.between(T22), model))
# Range
model_rho = gtsam.noiseModel_Isotropic.Sigma(1, 0.01)
graph.add(gtsam.RangeFactorPose2(12, 22, 1.0, model_rho))
params = gtsam.DoglegParams()
params.setDeltaInitial(args.delta) # default is 10
# Add progressively more noise to ground truth
sigmas = [0.01, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20]
n = len(sigmas)
p_dl, s_dl, p_lm, s_lm = [0]*n, [0]*n, [0]*n, [0]*n
for i, sigma in enumerate(sigmas):
dl_fails, lm_fails = 0, 0
# Attempt num_samples optimizations for both DL and LM
for _attempt in range(args.num_samples):
initial = gtsam.Values()
initial.insert(11, T11.retract(np.random.normal(0, sigma, 3)))
initial.insert(12, T12.retract(np.random.normal(0, sigma, 3)))
initial.insert(21, T21.retract(np.random.normal(0, sigma, 3)))
initial.insert(22, T22.retract(np.random.normal(0, sigma, 3)))
# Run dogleg optimizer
dl = gtsam.DoglegOptimizer(graph, initial, params)
result = dl.optimize()
dl_fails += graph.error(result) > 1e-9
# Run
lm = gtsam.LevenbergMarquardtOptimizer(graph, initial)
result = lm.optimize()
lm_fails += graph.error(result) > 1e-9
# Calculate Bayes estimate of success probability
# using a beta prior of alpha=0.5, beta=0.5
alpha, beta = 0.5, 0.5
v = args.num_samples+alpha+beta
p_dl[i] = (args.num_samples-dl_fails+alpha)/v
p_lm[i] = (args.num_samples-lm_fails+alpha)/v
def stddev(p):
"""Calculate standard deviation."""
return math.sqrt(p*(1-p)/(1+v))
s_dl[i] = stddev(p_dl[i])
s_lm[i] = stddev(p_lm[i])
fmt = "sigma= {}:\tDL success {:.2f}% +/- {:.2f}%, LM success {:.2f}% +/- {:.2f}%"
print(fmt.format(sigma,
100*p_dl[i], 100*s_dl[i],
100*p_lm[i], 100*s_lm[i]))
if args.plot:
fig, ax = plt.subplots()
dl_plot = plt.errorbar(sigmas, p_dl, yerr=s_dl, label="Dogleg")
lm_plot = plt.errorbar(sigmas, p_lm, yerr=s_lm, label="LM")
plt.title("Dogleg emprical success vs. LM")
plt.legend(handles=[dl_plot, lm_plot])
ax.set_xlim(0, sigmas[-1]+1)
ax.set_ylim(0, 1)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Compare Dogleg and LM success rates")
parser.add_argument("-n", "--num_samples", type=int, default=1000,
help="Number of samples for each sigma")
parser.add_argument("-d", "--delta", type=float, default=10.0,
help="Initial delta for dogleg")
parser.add_argument("-p", "--plot", action="store_true",
help="Flag to plot results")
args = parser.parse_args()
run(args)

View File

@ -115,7 +115,7 @@ class ThreeLinkArm(object):
l1Zl1 = expmap(0.0, 0.0, q[0]) l1Zl1 = expmap(0.0, 0.0, q[0])
l2Zl2 = expmap(0.0, self.L1, q[1]) l2Zl2 = expmap(0.0, self.L1, q[1])
l3Zl3 = expmap(0.0, 7.0, q[2]) l3Zl3 = expmap(0.0, self.L1+self.L2, q[2])
return compose(l1Zl1, l2Zl2, l3Zl3, self.sXt0) return compose(l1Zl1, l2Zl2, l3Zl3, self.sXt0)
def ik(self, sTt_desired, e=1e-9): def ik(self, sTt_desired, e=1e-9):
@ -297,12 +297,18 @@ def run_example():
""" Use trajectory interpolation and then trajectory tracking a la Murray """ Use trajectory interpolation and then trajectory tracking a la Murray
to move a 3-link arm on a straight line. to move a 3-link arm on a straight line.
""" """
# Create arm
arm = ThreeLinkArm() arm = ThreeLinkArm()
# Get initial pose using forward kinematics
q = np.radians(vector3(30, -30, 45)) q = np.radians(vector3(30, -30, 45))
sTt_initial = arm.fk(q) sTt_initial = arm.fk(q)
# Create interpolated trajectory in task space to desired goal pose
sTt_goal = Pose2(2.4, 4.3, math.radians(0)) sTt_goal = Pose2(2.4, 4.3, math.radians(0))
poses = trajectory(sTt_initial, sTt_goal, 50) poses = trajectory(sTt_initial, sTt_goal, 50)
# Setup figure and plot initial pose
fignum = 0 fignum = 0
fig = plt.figure(fignum) fig = plt.figure(fignum)
axes = fig.gca() axes = fig.gca()
@ -310,6 +316,8 @@ def run_example():
axes.set_ylim(0, 10) axes.set_ylim(0, 10)
gtsam_plot.plot_pose2(fignum, arm.fk(q)) gtsam_plot.plot_pose2(fignum, arm.fk(q))
# For all poses in interpolated trajectory, calculate dq to move to next pose.
# We do this by calculating the local Jacobian J and doing dq = inv(J)*delta(sTt, pose).
for pose in poses: for pose in poses:
sTt = arm.fk(q) sTt = arm.fk(q)
error = delta(sTt, pose) error = delta(sTt, pose)

View File

@ -30,9 +30,9 @@ class TestPose3(GtsamTestCase):
self.gtsamAssertEquals(actual, expected, 1e-6) self.gtsamAssertEquals(actual, expected, 1e-6)
def test_transform_to(self): def test_transform_to(self):
"""Test transform_to method.""" """Test transformTo method."""
transform = Pose3(Rot3.Rodrigues(0, 0, -1.570796), Point3(2, 4, 0)) transform = Pose3(Rot3.Rodrigues(0, 0, -1.570796), Point3(2, 4, 0))
actual = transform.transform_to(Point3(3, 2, 10)) actual = transform.transformTo(Point3(3, 2, 10))
expected = Point3(2, 1, 10) expected = Point3(2, 1, 10)
self.gtsamAssertEquals(actual, expected, 1e-6) self.gtsamAssertEquals(actual, expected, 1e-6)

View File

@ -0,0 +1,38 @@
"""
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
See LICENSE for the license information
Unit tests for Disjoint Set Forest.
Author: Frank Dellaert & Varun Agrawal
"""
# pylint: disable=invalid-name, no-name-in-module, no-member
from __future__ import print_function
import unittest
import gtsam
from gtsam.utils.test_case import GtsamTestCase
class TestDSFMap(GtsamTestCase):
"""Tests for DSFMap."""
def test_all(self):
"""Test everything in DFSMap."""
def key(index_pair):
return index_pair.i(), index_pair.j()
dsf = gtsam.DSFMapIndexPair()
pair1 = gtsam.IndexPair(1, 18)
self.assertEqual(key(dsf.find(pair1)), key(pair1))
pair2 = gtsam.IndexPair(2, 2)
dsf.merge(pair1, pair2)
self.assertTrue(dsf.find(pair1), dsf.find(pair1))
if __name__ == '__main__':
unittest.main()

20
examples/Data/HS21.QPS Normal file
View File

@ -0,0 +1,20 @@
NAME HS21
ROWS
N OBJ.FUNC
G R------1
COLUMNS
C------1 R------1 0.100000e+02
C------2 R------1 -.100000e+01
RHS
RHS OBJ.FUNC 0.100000e+03
RHS R------1 0.100000e+02
RANGES
BOUNDS
LO BOUNDS C------1 0.200000e+01
UP BOUNDS C------1 0.500000e+02
LO BOUNDS C------2 -.500000e+02
UP BOUNDS C------2 0.500000e+02
QUADOBJ
C------1 C------1 0.200000e-01
C------2 C------2 0.200000e+01
ENDATA

55
examples/Data/HS268.QPS Normal file
View File

@ -0,0 +1,55 @@
NAME HS268
ROWS
N OBJ.FUNC
G R------1
G R------2
G R------3
G R------4
G R------5
COLUMNS
C------1 OBJ.FUNC 0.183400e+05 R------1 -.100000e+01
C------1 R------2 0.100000e+02 R------3 -.800000e+01
C------1 R------4 0.800000e+01 R------5 -.400000e+01
C------2 OBJ.FUNC -.341980e+05 R------1 -.100000e+01
C------2 R------2 0.100000e+02 R------3 0.100000e+01
C------2 R------4 -.100000e+01 R------5 -.200000e+01
C------3 OBJ.FUNC 0.454200e+04 R------1 -.100000e+01
C------3 R------2 -.300000e+01 R------3 -.200000e+01
C------3 R------4 0.200000e+01 R------5 0.300000e+01
C------4 OBJ.FUNC 0.867200e+04 R------1 -.100000e+01
C------4 R------2 0.500000e+01 R------3 -.500000e+01
C------4 R------4 0.500000e+01 R------5 -.500000e+01
C------5 OBJ.FUNC 0.860000e+02 R------1 -.100000e+01
C------5 R------2 0.400000e+01 R------3 0.300000e+01
C------5 R------4 -.300000e+01 R------5 0.100000e+01
RHS
RHS OBJ.FUNC -.144630e+05
RHS R------1 -.500000e+01
RHS R------2 0.200000e+02
RHS R------3 -.400000e+02
RHS R------4 0.110000e+02
RHS R------5 -.300000e+02
RANGES
BOUNDS
FR BOUNDS C------1
FR BOUNDS C------2
FR BOUNDS C------3
FR BOUNDS C------4
FR BOUNDS C------5
QUADOBJ
C------1 C------1 0.203940e+05
C------1 C------2 -.249080e+05
C------1 C------3 -.202600e+04
C------1 C------4 0.389600e+04
C------1 C------5 0.658000e+03
C------2 C------2 0.418180e+05
C------2 C------3 -.346600e+04
C------2 C------4 -.982800e+04
C------2 C------5 -.372000e+03
C------3 C------3 0.351000e+04
C------3 C------4 0.217800e+04
C------3 C------5 -.348000e+03
C------4 C------4 0.303000e+04
C------4 C------5 -.440000e+02
C------5 C------5 0.540000e+02
ENDATA

20
examples/Data/HS35.QPS Normal file
View File

@ -0,0 +1,20 @@
NAME HS35
ROWS
N OBJ.FUNC
G R------1
COLUMNS
C------1 OBJ.FUNC -.800000e+01 R------1 -.100000e+01
C------2 OBJ.FUNC -.600000e+01 R------1 -.100000e+01
C------3 OBJ.FUNC -.400000e+01 R------1 -.200000e+01
RHS
RHS OBJ.FUNC -.900000e+01
RHS R------1 -.300000e+01
RANGES
BOUNDS
QUADOBJ
C------1 C------1 0.400000e+01
C------1 C------2 0.200000e+01
C------1 C------3 0.200000e+01
C------2 C------2 0.400000e+01
C------3 C------3 0.200000e+01
ENDATA

21
examples/Data/HS35MOD.QPS Normal file
View File

@ -0,0 +1,21 @@
NAME HS35MOD
ROWS
N OBJ.FUNC
G R------1
COLUMNS
C------1 OBJ.FUNC -.800000e+01 R------1 -.100000e+01
C------2 OBJ.FUNC -.600000e+01 R------1 -.100000e+01
C------3 OBJ.FUNC -.400000e+01 R------1 -.200000e+01
RHS
RHS OBJ.FUNC -.900000e+01
RHS R------1 -.300000e+01
RANGES
BOUNDS
FX BOUNDS C------2 0.500000e+00
QUADOBJ
C------1 C------1 0.400000e+01
C------1 C------2 0.200000e+01
C------1 C------3 0.200000e+01
C------2 C------2 0.400000e+01
C------3 C------3 0.200000e+01
ENDATA

33
examples/Data/HS51.QPS Normal file
View File

@ -0,0 +1,33 @@
NAME HS51
ROWS
N OBJ.FUNC
E R------1
E R------2
E R------3
COLUMNS
C------1 R------1 0.100000e+01
C------2 OBJ.FUNC -.400000e+01 R------1 0.300000e+01
C------2 R------3 0.100000e+01
C------3 OBJ.FUNC -.400000e+01 R------2 0.100000e+01
C------4 OBJ.FUNC -.200000e+01 R------2 0.100000e+01
C------5 OBJ.FUNC -.200000e+01 R------2 -.200000e+01
C------5 R------3 -.100000e+01
RHS
RHS OBJ.FUNC -.600000e+01
RHS R------1 0.400000e+01
RANGES
BOUNDS
FR BOUNDS C------1
FR BOUNDS C------2
FR BOUNDS C------3
FR BOUNDS C------4
FR BOUNDS C------5
QUADOBJ
C------1 C------1 0.200000e+01
C------1 C------2 -.200000e+01
C------2 C------2 0.400000e+01
C------2 C------3 0.200000e+01
C------3 C------3 0.200000e+01
C------4 C------4 0.200000e+01
C------5 C------5 0.200000e+01
ENDATA

32
examples/Data/HS52.QPS Normal file
View File

@ -0,0 +1,32 @@
NAME HS52
ROWS
N OBJ.FUNC
E R------1
E R------2
E R------3
COLUMNS
C------1 R------1 0.100000e+01
C------2 OBJ.FUNC -.400000e+01 R------1 0.300000e+01
C------2 R------3 0.100000e+01
C------3 OBJ.FUNC -.400000e+01 R------2 0.100000e+01
C------4 OBJ.FUNC -.200000e+01 R------2 0.100000e+01
C------5 OBJ.FUNC -.200000e+01 R------2 -.200000e+01
C------5 R------3 -.100000e+01
RHS
RHS OBJ.FUNC -.600000e+01
RANGES
BOUNDS
FR BOUNDS C------1
FR BOUNDS C------2
FR BOUNDS C------3
FR BOUNDS C------4
FR BOUNDS C------5
QUADOBJ
C------1 C------1 0.320000e+02
C------1 C------2 -.800000e+01
C------2 C------2 0.400000e+01
C------2 C------3 0.200000e+01
C------3 C------3 0.200000e+01
C------4 C------4 0.200000e+01
C------5 C------5 0.200000e+01
ENDATA

19
examples/Data/QPTEST.QPS Normal file
View File

@ -0,0 +1,19 @@
NAME QP example
ROWS
N obj
G r1
L r2
COLUMNS
c1 r1 2.0 r2 -1.0
c1 obj 1.5
c2 r1 1.0 r2 2.0
c2 obj -2.0
RHS
rhs1 r1 2.0 r2 6.0
BOUNDS
UP bnd1 c1 20.0
QUADOBJ
c1 c1 8.0
c1 c2 2.0
c2 c2 10.0
ENDATA

File diff suppressed because it is too large Load Diff

169
examples/Data/toy3D.xml Normal file
View File

@ -0,0 +1,169 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<!DOCTYPE boost_serialization>
<boost_serialization signature="serialization::archive" version="17">
<graph class_id="0" tracking_level="0" version="0">
<Base class_id="1" tracking_level="0" version="0">
<factors_ class_id="2" tracking_level="0" version="0">
<count>2</count>
<item_version>1</item_version>
<item class_id="3" tracking_level="0" version="1">
<px class_id="4" class_name="JacobianFactor" tracking_level="1" version="0" object_id="_0">
<Base class_id="5" tracking_level="0" version="0">
<Base class_id="6" tracking_level="0" version="0">
<keys_>
<count>2</count>
<item_version>0</item_version>
<item>0</item>
<item>1</item>
</keys_>
</Base>
</Base>
<Ab_ class_id="8" tracking_level="0" version="0">
<matrix_ class_id="9" tracking_level="0" version="0">
<rows>9</rows>
<cols>7</cols>
<data>
<item>1.22427709071730959e+01</item>
<item>1.51514613104920048e+01</item>
<item>3.60934366857813060e+00</item>
<item>-1.18407259026383116e+01</item>
<item>7.84826117220921216e+00</item>
<item>1.23509165494819051e+01</item>
<item>-6.09875015991639735e+00</item>
<item>6.16547190708139126e-01</item>
<item>3.94972084922329048e+00</item>
<item>-4.89208482920378174e+00</item>
<item>3.02091647632478866e+00</item>
<item>-8.95328692238917512e+00</item>
<item>7.89831607220345955e+00</item>
<item>-2.36793602009719084e+00</item>
<item>1.48517612051941725e+01</item>
<item>-3.97284286249233731e-01</item>
<item>-1.95744531643153863e+01</item>
<item>-3.85954855417462017e+00</item>
<item>4.79268277145419042e+00</item>
<item>-9.01707953629520453e+00</item>
<item>1.37848069005841385e+01</item>
<item>1.04829326688375950e+01</item>
<item>-5.00630568442241675e+00</item>
<item>4.70463561852773182e+00</item>
<item>-1.59179134598689274e+01</item>
<item>-2.04767784956723942e+00</item>
<item>9.54135497908261954e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>-0.00000000000000000e+00</item>
<item>-1.76201757312015372e+00</item>
<item>1.98634190821282672e+01</item>
<item>1.52966546661624236e+00</item>
<item>1.94817649567575373e+01</item>
<item>1.39684693294110307e+00</item>
<item>4.30228460420588288e+00</item>
<item>1.76201757312015372e+00</item>
<item>-1.98634190821282672e+01</item>
<item>-1.52966546661624236e+00</item>
<item>-0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>4.16606867942304948e+00</item>
<item>1.86906420801308037e+00</item>
<item>-1.94717865319198360e+01</item>
<item>-1.94817649567575373e+01</item>
<item>-1.39684693294110307e+00</item>
<item>-4.30228460420588288e+00</item>
<item>-4.16606867942304948e+00</item>
<item>-1.86906420801308037e+00</item>
<item>1.94717865319198360e+01</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>1.00891462904330531e+01</item>
<item>-1.08132497987816869e+01</item>
<item>8.66487736568128497e+00</item>
<item>2.88370015604634311e+01</item>
<item>1.89391698948574643e+01</item>
<item>2.12398960190661290e+00</item>
<item>1.22150946112124039e+01</item>
<item>-2.33658532501596561e+01</item>
<item>1.51576204760307363e+01</item>
</data>
</matrix_>
<variableColOffsets_>
<count>4</count>
<item_version>0</item_version>
<item>0</item>
<item>3</item>
<item>6</item>
<item>7</item>
</variableColOffsets_>
<rowStart_>0</rowStart_>
<rowEnd_>9</rowEnd_>
<blockStart_>0</blockStart_>
</Ab_>
<model_ class_id="11" tracking_level="0" version="1">
<px class_id="-1"></px>
</model_>
</px>
</item>
<item>
<px class_id_reference="4" object_id="_1">
<Base>
<Base>
<keys_>
<count>2</count>
<item_version>0</item_version>
<item>0</item>
<item>1</item>
</keys_>
</Base>
</Base>
<Ab_>
<matrix_>
<rows>3</rows>
<cols>7</cols>
<data>
<item>1.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>1.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>1.00000000000000000e+00</item>
<item>1.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>1.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>1.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
<item>0.00000000000000000e+00</item>
</data>
</matrix_>
<variableColOffsets_>
<count>4</count>
<item_version>0</item_version>
<item>0</item>
<item>3</item>
<item>6</item>
<item>7</item>
</variableColOffsets_>
<rowStart_>0</rowStart_>
<rowEnd_>3</rowEnd_>
<blockStart_>0</blockStart_>
</Ab_>
<model_>
<px class_id="-1"></px>
</model_>
</px>
</item>
</factors_>
</Base>
</graph>
</boost_serialization>

View File

@ -0,0 +1,92 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file InverseKinematicsExampleExpressions.cpp
* @brief Implement inverse kinematics on a three-link arm using expressions.
* @date April 15, 2019
* @author Frank Dellaert
*/
#include <gtsam/geometry/Pose2.h>
#include <gtsam/nonlinear/ExpressionFactorGraph.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/nonlinear/Marginals.h>
#include <gtsam/nonlinear/expressions.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/expressions.h>
#include <cmath>
using namespace std;
using namespace gtsam;
// Scalar multiplication of a vector, with derivatives.
inline Vector3 scalarMultiply(const double& s, const Vector3& v,
OptionalJacobian<3, 1> Hs,
OptionalJacobian<3, 3> Hv) {
if (Hs) *Hs = v;
if (Hv) *Hv = s * I_3x3;
return s * v;
}
// Expression version of scalar product, using above function.
inline Vector3_ operator*(const Double_& s, const Vector3_& v) {
return Vector3_(&scalarMultiply, s, v);
}
// Expression version of Pose2::Expmap
inline Pose2_ Expmap(const Vector3_& xi) { return Pose2_(&Pose2::Expmap, xi); }
// Main function
int main(int argc, char** argv) {
// Three-link planar manipulator specification.
const double L1 = 3.5, L2 = 3.5, L3 = 2.5; // link lengths
const Pose2 sXt0(0, L1 + L2 + L3, M_PI / 2); // end-effector pose at rest
const Vector3 xi1(0, 0, 1), xi2(L1, 0, 1),
xi3(L1 + L2, 0, 1); // screw axes at rest
// Create Expressions for unknowns
using symbol_shorthand::Q;
Double_ q1(Q(1)), q2(Q(2)), q3(Q(3));
// Forward kinematics expression as product of exponentials
Pose2_ l1Zl1 = Expmap(q1 * Vector3_(xi1));
Pose2_ l2Zl2 = Expmap(q2 * Vector3_(xi2));
Pose2_ l3Zl3 = Expmap(q3 * Vector3_(xi3));
Pose2_ forward = compose(compose(l1Zl1, l2Zl2), compose(l3Zl3, Pose2_(sXt0)));
// Create a factor graph with a a single expression factor.
ExpressionFactorGraph graph;
Pose2 desiredEndEffectorPose(3, 2, 0);
auto model = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
graph.addExpressionFactor(forward, desiredEndEffectorPose, model);
// Create initial estimate
Values initial;
initial.insert(Q(1), 0.1);
initial.insert(Q(2), 0.2);
initial.insert(Q(3), 0.3);
initial.print("\nInitial Estimate:\n"); // print
GTSAM_PRINT(forward.value(initial));
// Optimize the initial values using a Levenberg-Marquardt nonlinear optimizer
LevenbergMarquardtParams params;
params.setlambdaInitial(1e6);
LevenbergMarquardtOptimizer optimizer(graph, initial, params);
Values result = optimizer.optimize();
result.print("Final Result:\n");
GTSAM_PRINT(forward.value(result));
return 0;
}

View File

@ -79,7 +79,7 @@ int main(const int argc, const char *argv[]) {
key2 = pose3Between->key2() - firstKey; key2 = pose3Between->key2() - firstKey;
} }
NonlinearFactor::shared_ptr simpleFactor( NonlinearFactor::shared_ptr simpleFactor(
new BetweenFactor<Pose3>(key1, key2, pose3Between->measured(), pose3Between->get_noiseModel())); new BetweenFactor<Pose3>(key1, key2, pose3Between->measured(), pose3Between->noiseModel()));
simpleGraph.add(simpleFactor); simpleGraph.add(simpleFactor);
} }
} }

View File

@ -72,7 +72,7 @@ int main(int argc, char* argv[]) {
Point2 measurement = camera.project(points[j]); Point2 measurement = camera.project(points[j]);
// Below an expression for the prediction of the measurement: // Below an expression for the prediction of the measurement:
Point3_ p('l', j); Point3_ p('l', j);
Point2_ prediction = uncalibrate(cK, project(transform_to(x, p))); Point2_ prediction = uncalibrate(cK, project(transformTo(x, p)));
// Again, here we use an ExpressionFactor // Again, here we use an ExpressionFactor
graph.addExpressionFactor(prediction, measurement, measurementNoise); graph.addExpressionFactor(prediction, measurement, measurementNoise);
} }

View File

@ -362,7 +362,7 @@ void runIncremental()
Rot2 measuredBearing = measured.bearing(); Rot2 measuredBearing = measured.bearing();
double measuredRange = measured.range(); double measuredRange = measured.range();
newVariables.insert(lmKey, newVariables.insert(lmKey,
pose.transform_from(measuredBearing.rotate(Point2(measuredRange, 0.0)))); pose.transformFrom(measuredBearing.rotate(Point2(measuredRange, 0.0))));
} }
} }
else else

View File

@ -89,8 +89,8 @@ int main(int argc, char** argv){
//if the landmark variable included in this factor has not yet been added to the initial variable value estimate, add it //if the landmark variable included in this factor has not yet been added to the initial variable value estimate, add it
if (!initial_estimate.exists(Symbol('l', l))) { if (!initial_estimate.exists(Symbol('l', l))) {
Pose3 camPose = initial_estimate.at<Pose3>(Symbol('x', x)); Pose3 camPose = initial_estimate.at<Pose3>(Symbol('x', x));
//transform_from() transforms the input Point3 from the camera pose space, camPose, to the global space //transformFrom() transforms the input Point3 from the camera pose space, camPose, to the global space
Point3 worldPoint = camPose.transform_from(Point3(X, Y, Z)); Point3 worldPoint = camPose.transformFrom(Point3(X, Y, Z));
initial_estimate.insert(Symbol('l', l), worldPoint); initial_estimate.insert(Symbol('l', l), worldPoint);
} }
} }

View File

@ -140,7 +140,7 @@ int main() {
const GaussianConditional::shared_ptr& cg0 = linearBayesNet->back(); const GaussianConditional::shared_ptr& cg0 = linearBayesNet->back();
assert(cg0->nrFrontals() == 1); assert(cg0->nrFrontals() == 1);
assert(cg0->nrParents() == 0); assert(cg0->nrParents() == 0);
linearFactorGraph->add(0, cg0->get_R(), cg0->get_d() - cg0->get_R()*result[ordering->at(x1)], noiseModel::Diagonal::Sigmas(cg0->get_sigmas(), true)); linearFactorGraph->add(0, cg0->R(), cg0->d() - cg0->R()*result[ordering->at(x1)], noiseModel::Diagonal::Sigmas(cg0->get_sigmas(), true));
// Create the desired ordering // Create the desired ordering
ordering = Ordering::shared_ptr(new Ordering); ordering = Ordering::shared_ptr(new Ordering);

67
gtsam.h
View File

@ -209,11 +209,60 @@ class KeyGroupMap {
bool insert2(size_t key, int val); bool insert2(size_t key, int val);
}; };
// Actually a FastSet<FactorIndex>
class FactorIndexSet {
FactorIndexSet();
FactorIndexSet(const gtsam::FactorIndexSet& set);
// common STL methods
size_t size() const;
bool empty() const;
void clear();
// structure specific methods
void insert(size_t factorIndex);
bool erase(size_t factorIndex); // returns true if value was removed
bool count(size_t factorIndex) const; // returns true if value exists
};
// Actually a vector<FactorIndex>
class FactorIndices {
FactorIndices();
FactorIndices(const gtsam::FactorIndices& other);
// common STL methods
size_t size() const;
bool empty() const;
void clear();
// structure specific methods
size_t at(size_t i) const;
size_t front() const;
size_t back() const;
void push_back(size_t factorIndex) const;
};
//************************************************************************* //*************************************************************************
// base // base
//************************************************************************* //*************************************************************************
/** gtsam namespace functions */ /** gtsam namespace functions */
#include <gtsam/base/DSFMap.h>
class IndexPair {
IndexPair();
IndexPair(size_t i, size_t j);
size_t i() const;
size_t j() const;
};
template<KEY = {gtsam::IndexPair}>
class DSFMap {
DSFMap();
KEY find(const KEY& key) const;
void merge(const KEY& x, const KEY& y);
};
#include <gtsam/base/Matrix.h> #include <gtsam/base/Matrix.h>
bool linear_independent(Matrix A, Matrix B, double tol); bool linear_independent(Matrix A, Matrix B, double tol);
@ -583,8 +632,8 @@ class Pose2 {
static Matrix wedge(double vx, double vy, double w); static Matrix wedge(double vx, double vy, double w);
// Group Actions on Point2 // Group Actions on Point2
gtsam::Point2 transform_from(const gtsam::Point2& p) const; gtsam::Point2 transformFrom(const gtsam::Point2& p) const;
gtsam::Point2 transform_to(const gtsam::Point2& p) const; gtsam::Point2 transformTo(const gtsam::Point2& p) const;
// Standard Interface // Standard Interface
double x() const; double x() const;
@ -636,8 +685,8 @@ class Pose3 {
static Matrix wedge(double wx, double wy, double wz, double vx, double vy, double vz); static Matrix wedge(double wx, double wy, double wz, double vx, double vy, double vz);
// Group Action on Point3 // Group Action on Point3
gtsam::Point3 transform_from(const gtsam::Point3& point) const; gtsam::Point3 transformFrom(const gtsam::Point3& point) const;
gtsam::Point3 transform_to(const gtsam::Point3& point) const; gtsam::Point3 transformTo(const gtsam::Point3& point) const;
// Standard Interface // Standard Interface
gtsam::Rot3 rotation() const; gtsam::Rot3 rotation() const;
@ -646,7 +695,8 @@ class Pose3 {
double y() const; double y() const;
double z() const; double z() const;
Matrix matrix() const; Matrix matrix() const;
gtsam::Pose3 transform_to(const gtsam::Pose3& pose) const; // FIXME: shadows other transform_to() gtsam::Pose3 transformPoseFrom(const gtsam::Pose3& pose) const;
gtsam::Pose3 transformPoseTo(const gtsam::Pose3& pose) const;
double range(const gtsam::Point3& point); double range(const gtsam::Point3& point);
double range(const gtsam::Pose3& pose); double range(const gtsam::Pose3& pose);
@ -1730,7 +1780,7 @@ virtual class SubgraphSolverParameters : gtsam::ConjugateGradientParameters {
virtual class SubgraphSolver { virtual class SubgraphSolver {
SubgraphSolver(const gtsam::GaussianFactorGraph &A, const gtsam::SubgraphSolverParameters &parameters, const gtsam::Ordering& ordering); SubgraphSolver(const gtsam::GaussianFactorGraph &A, const gtsam::SubgraphSolverParameters &parameters, const gtsam::Ordering& ordering);
SubgraphSolver(const gtsam::GaussianFactorGraph &Ab1, const gtsam::GaussianFactorGraph &Ab2, const gtsam::SubgraphSolverParameters &parameters, const gtsam::Ordering& ordering); SubgraphSolver(const gtsam::GaussianFactorGraph &Ab1, const gtsam::GaussianFactorGraph* Ab2, const gtsam::SubgraphSolverParameters &parameters, const gtsam::Ordering& ordering);
gtsam::VectorValues optimize() const; gtsam::VectorValues optimize() const;
}; };
@ -1867,7 +1917,6 @@ virtual class NonlinearFactor {
#include <gtsam/nonlinear/NonlinearFactor.h> #include <gtsam/nonlinear/NonlinearFactor.h>
virtual class NoiseModelFactor: gtsam::NonlinearFactor { virtual class NoiseModelFactor: gtsam::NonlinearFactor {
bool equals(const gtsam::NoiseModelFactor& other, double tol) const; bool equals(const gtsam::NoiseModelFactor& other, double tol) const;
gtsam::noiseModel::Base* get_noiseModel() const; // deprecated by below
gtsam::noiseModel::Base* noiseModel() const; gtsam::noiseModel::Base* noiseModel() const;
Vector unwhitenedError(const gtsam::Values& x) const; Vector unwhitenedError(const gtsam::Values& x) const;
Vector whitenedError(const gtsam::Values& x) const; Vector whitenedError(const gtsam::Values& x) const;
@ -2091,7 +2140,7 @@ virtual class NonlinearOptimizer {
double error() const; double error() const;
int iterations() const; int iterations() const;
gtsam::Values values() const; gtsam::Values values() const;
void iterate() const; gtsam::GaussianFactorGraph* iterate() const;
}; };
#include <gtsam/nonlinear/GaussNewtonOptimizer.h> #include <gtsam/nonlinear/GaussNewtonOptimizer.h>
@ -2209,8 +2258,6 @@ class ISAM2Result {
size_t getCliques() const; size_t getCliques() const;
}; };
class FactorIndices {};
class ISAM2 { class ISAM2 {
ISAM2(); ISAM2();
ISAM2(const gtsam::ISAM2Params& params); ISAM2(const gtsam::ISAM2Params& params);

View File

@ -101,7 +101,10 @@ message(STATUS "Building GTSAM - shared: ${BUILD_SHARED_LIBS}")
# BUILD_SHARED_LIBS automatically defines static/shared libs: # BUILD_SHARED_LIBS automatically defines static/shared libs:
add_library(gtsam ${gtsam_srcs}) add_library(gtsam ${gtsam_srcs})
# Boost:
target_link_libraries(gtsam PUBLIC ${GTSAM_BOOST_LIBRARIES}) target_link_libraries(gtsam PUBLIC ${GTSAM_BOOST_LIBRARIES})
target_include_directories(gtsam PUBLIC ${Boost_INCLUDE_DIR})
# Other deps:
target_link_libraries(gtsam PUBLIC ${GTSAM_ADDITIONAL_LIBRARIES}) target_link_libraries(gtsam PUBLIC ${GTSAM_ADDITIONAL_LIBRARIES})
target_compile_definitions(gtsam PRIVATE ${GTSAM_COMPILE_DEFINITIONS_PRIVATE}) target_compile_definitions(gtsam PRIVATE ${GTSAM_COMPILE_DEFINITIONS_PRIVATE})
target_compile_definitions(gtsam PUBLIC ${GTSAM_COMPILE_DEFINITIONS_PUBLIC}) target_compile_definitions(gtsam PUBLIC ${GTSAM_COMPILE_DEFINITIONS_PUBLIC})

View File

@ -18,9 +18,9 @@
#pragma once #pragma once
#include <cstdlib> // Provides size_t
#include <map> #include <map>
#include <set> #include <set>
#include <cstdlib> // Provides size_t
namespace gtsam { namespace gtsam {
@ -29,11 +29,9 @@ namespace gtsam {
* Uses rank compression and union by rank, iterator version * Uses rank compression and union by rank, iterator version
* @addtogroup base * @addtogroup base
*/ */
template<class KEY> template <class KEY>
class DSFMap { class DSFMap {
protected:
protected:
/// We store the forest in an STL map, but parents are done with pointers /// We store the forest in an STL map, but parents are done with pointers
struct Entry { struct Entry {
typename std::map<KEY, Entry>::iterator parent_; typename std::map<KEY, Entry>::iterator parent_;
@ -62,8 +60,7 @@ protected:
iterator find_(const iterator& it) const { iterator find_(const iterator& it) const {
// follow parent pointers until we reach set representative // follow parent pointers until we reach set representative
iterator& parent = it->second.parent_; iterator& parent = it->second.parent_;
if (parent != it) if (parent != it) parent = find_(parent); // not yet, recurse!
parent = find_(parent); // not yet, recurse!
return parent; return parent;
} }
@ -73,13 +70,11 @@ protected:
return find_(initial); return find_(initial);
} }
public: public:
typedef std::set<KEY> Set; typedef std::set<KEY> Set;
/// constructor /// constructor
DSFMap() { DSFMap() {}
}
/// Given key, find the representative key for the set in which it lives /// Given key, find the representative key for the set in which it lives
inline KEY find(const KEY& key) const { inline KEY find(const KEY& key) const {
@ -89,12 +84,10 @@ public:
/// Merge two sets /// Merge two sets
void merge(const KEY& x, const KEY& y) { void merge(const KEY& x, const KEY& y) {
// straight from http://en.wikipedia.org/wiki/Disjoint-set_data_structure // straight from http://en.wikipedia.org/wiki/Disjoint-set_data_structure
iterator xRoot = find_(x); iterator xRoot = find_(x);
iterator yRoot = find_(y); iterator yRoot = find_(y);
if (xRoot == yRoot) if (xRoot == yRoot) return;
return;
// Merge sets // Merge sets
if (xRoot->second.rank_ < yRoot->second.rank_) if (xRoot->second.rank_ < yRoot->second.rank_)
@ -117,7 +110,14 @@ public:
} }
return sets; return sets;
} }
}; };
} /// Small utility class for representing a wrappable pairs of ints.
class IndexPair : public std::pair<size_t,size_t> {
public:
IndexPair(): std::pair<size_t,size_t>(0,0) {}
IndexPair(size_t i, size_t j) : std::pair<size_t,size_t>(i,j) {}
inline size_t i() const { return first; };
inline size_t j() const { return second; };
};
} // namespace gtsam

View File

@ -65,8 +65,9 @@ Matrix SymmetricBlockMatrix::block(DenseIndex I, DenseIndex J) const {
void SymmetricBlockMatrix::choleskyPartial(DenseIndex nFrontals) { void SymmetricBlockMatrix::choleskyPartial(DenseIndex nFrontals) {
gttic(VerticalBlockMatrix_choleskyPartial); gttic(VerticalBlockMatrix_choleskyPartial);
DenseIndex topleft = variableColOffsets_[blockStart_]; DenseIndex topleft = variableColOffsets_[blockStart_];
if (!gtsam::choleskyPartial(matrix_, offset(nFrontals) - topleft, topleft)) if (!gtsam::choleskyPartial(matrix_, offset(nFrontals) - topleft, topleft)) {
throw CholeskyFailed(); throw CholeskyFailed();
}
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -67,28 +67,29 @@ struct FixedSizeMatrix {
} }
/** /**
* Numerically compute gradient of scalar function * @brief Numerically compute gradient of scalar function
* @return n-dimensional gradient computed via central differencing
* Class X is the input argument * Class X is the input argument
* The class X needs to have dim, expmap, logmap * The class X needs to have dim, expmap, logmap
* int N is the dimension of the X input value if variable dimension type but known at test time
*/ */
template<class X>
typename internal::FixedSizeMatrix<X>::type numericalGradient(boost::function<double(const X&)> h, const X& x, template <class X, int N = traits<X>::dimension>
double delta = 1e-5) { typename Eigen::Matrix<double, N, 1> numericalGradient(
boost::function<double(const X&)> h, const X& x, double delta = 1e-5) {
double factor = 1.0 / (2.0 * delta); double factor = 1.0 / (2.0 * delta);
BOOST_STATIC_ASSERT_MSG( BOOST_STATIC_ASSERT_MSG(
(boost::is_base_of<manifold_tag, typename traits<X>::structure_category>::value), (boost::is_base_of<manifold_tag, typename traits<X>::structure_category>::value),
"Template argument X must be a manifold type."); "Template argument X must be a manifold type.");
static const int N = traits<X>::dimension; BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type or N must be specified.");
BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type.");
typedef typename traits<X>::TangentVector TangentX;
// Prepare a tangent vector to perturb x with, only works for fixed size // Prepare a tangent vector to perturb x with, only works for fixed size
TangentX d; typename traits<X>::TangentVector d;
d.setZero(); d.setZero();
Eigen::Matrix<double,N,1> g; g.setZero(); // Can be fixed size Eigen::Matrix<double,N,1> g;
g.setZero();
for (int j = 0; j < N; j++) { for (int j = 0; j < N; j++) {
d(j) = delta; d(j) = delta;
double hxplus = h(traits<X>::Retract(x, d)); double hxplus = h(traits<X>::Retract(x, d));
@ -108,37 +109,34 @@ typename internal::FixedSizeMatrix<X>::type numericalGradient(boost::function<do
* @param delta increment for numerical derivative * @param delta increment for numerical derivative
* Class Y is the output argument * Class Y is the output argument
* Class X is the input argument * Class X is the input argument
* int N is the dimension of the X input value if variable dimension type but known at test time
* @return m*n Jacobian computed via central differencing * @return m*n Jacobian computed via central differencing
*/ */
template<class Y, class X> template <class Y, class X, int N = traits<X>::dimension>
// TODO Should compute fixed-size matrix // TODO Should compute fixed-size matrix
typename internal::FixedSizeMatrix<Y,X>::type numericalDerivative11(boost::function<Y(const X&)> h, const X& x, typename internal::FixedSizeMatrix<Y, X>::type numericalDerivative11(
double delta = 1e-5) { boost::function<Y(const X&)> h, const X& x, double delta = 1e-5) {
typedef typename internal::FixedSizeMatrix<Y,X>::type Matrix; typedef typename internal::FixedSizeMatrix<Y,X>::type Matrix;
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value), BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<Y>::structure_category>::value),
"Template argument Y must be a manifold type."); "Template argument Y must be a manifold type.");
typedef traits<Y> TraitsY; typedef traits<Y> TraitsY;
typedef typename TraitsY::TangentVector TangentY;
BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X>::structure_category>::value), BOOST_STATIC_ASSERT_MSG( (boost::is_base_of<gtsam::manifold_tag, typename traits<X>::structure_category>::value),
"Template argument X must be a manifold type."); "Template argument X must be a manifold type.");
static const int N = traits<X>::dimension; BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type or N must be specified.");
BOOST_STATIC_ASSERT_MSG(N>0, "Template argument X must be fixed-size type.");
typedef traits<X> TraitsX; typedef traits<X> TraitsX;
typedef typename TraitsX::TangentVector TangentX;
// get value at x, and corresponding chart // get value at x, and corresponding chart
const Y hx = h(x); const Y hx = h(x);
// Bit of a hack for now to find number of rows // Bit of a hack for now to find number of rows
const TangentY zeroY = TraitsY::Local(hx, hx); const typename TraitsY::TangentVector zeroY = TraitsY::Local(hx, hx);
const size_t m = zeroY.size(); const size_t m = zeroY.size();
// Prepare a tangent vector to perturb x with, only works for fixed size // Prepare a tangent vector to perturb x with, only works for fixed size
TangentX dx; Eigen::Matrix<double, N, 1> dx;
dx.setZero(); dx.setZero();
// Fill in Jacobian H // Fill in Jacobian H
@ -146,9 +144,9 @@ typename internal::FixedSizeMatrix<Y,X>::type numericalDerivative11(boost::funct
const double factor = 1.0 / (2.0 * delta); const double factor = 1.0 / (2.0 * delta);
for (int j = 0; j < N; j++) { for (int j = 0; j < N; j++) {
dx(j) = delta; dx(j) = delta;
const TangentY dy1 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx))); const auto dy1 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx)));
dx(j) = -delta; dx(j) = -delta;
const TangentY dy2 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx))); const auto dy2 = TraitsY::Local(hx, h(TraitsX::Retract(x, dx)));
dx(j) = 0; dx(j) = 0;
H.col(j) << (dy1 - dy2) * factor; H.col(j) << (dy1 - dy2) * factor;
} }

View File

@ -16,7 +16,7 @@
* @brief unit tests for DSFMap * @brief unit tests for DSFMap
*/ */
#include <gtsam_unstable/base/DSFMap.h> #include <gtsam/base/DSFMap.h>
#include <boost/assign/std/list.hpp> #include <boost/assign/std/list.hpp>
#include <boost/assign/std/set.hpp> #include <boost/assign/std/set.hpp>
@ -115,7 +115,6 @@ TEST(DSFMap, mergePairwiseMatches2) {
TEST(DSFMap, sets){ TEST(DSFMap, sets){
// Create some "matches" // Create some "matches"
typedef pair<size_t,size_t> Match; typedef pair<size_t,size_t> Match;
typedef pair<size_t, set<size_t> > key_pair;
list<Match> matches; list<Match> matches;
matches += Match(1,2), Match(2,3), Match(4,5), Match(4,6); matches += Match(1,2), Match(2,3), Match(4,5), Match(4,6);
@ -140,6 +139,12 @@ TEST(DSFMap, sets){
EXPECT(s2 == sets[4]); EXPECT(s2 == sets[4]);
} }
/* ************************************************************************* */
TEST(DSFMap, findIndexPair) {
DSFMap<IndexPair> dsf;
EXPECT(dsf.find(IndexPair(1,2))==IndexPair(1,2));
EXPECT(dsf.find(IndexPair(1,2)) != dsf.find(IndexPair(1,3)));
}
/* ************************************************************************* */ /* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr);} int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -56,6 +56,9 @@ namespace gtsam {
/// Integer nonlinear key type /// Integer nonlinear key type
typedef std::uint64_t Key; typedef std::uint64_t Key;
/// Integer nonlinear factor index type
typedef std::uint64_t FactorIndex;
/// The index type for Eigen objects /// The index type for Eigen objects
typedef ptrdiff_t DenseIndex; typedef ptrdiff_t DenseIndex;

View File

@ -44,8 +44,7 @@ Cal3_S2::Cal3_S2(const std::string &path) :
if (infile) if (infile)
infile >> fx_ >> fy_ >> s_ >> u0_ >> v0_; infile >> fx_ >> fy_ >> s_ >> u0_ >> v0_;
else { else {
printf("Unable to load the calibration\n"); throw std::runtime_error("Cal3_S2: Unable to load the calibration");
exit(0);
} }
infile.close(); infile.close();

View File

@ -107,7 +107,7 @@ Point2 PinholeBase::Project(const Unit3& pc, OptionalJacobian<2, 2> Dpoint) {
/* ************************************************************************* */ /* ************************************************************************* */
pair<Point2, bool> PinholeBase::projectSafe(const Point3& pw) const { pair<Point2, bool> PinholeBase::projectSafe(const Point3& pw) const {
const Point3 pc = pose().transform_to(pw); const Point3 pc = pose().transformTo(pw);
const Point2 pn = Project(pc); const Point2 pn = Project(pc);
return make_pair(pn, pc.z() > 0); return make_pair(pn, pc.z() > 0);
} }
@ -116,8 +116,8 @@ pair<Point2, bool> PinholeBase::projectSafe(const Point3& pw) const {
Point2 PinholeBase::project2(const Point3& point, OptionalJacobian<2, 6> Dpose, Point2 PinholeBase::project2(const Point3& point, OptionalJacobian<2, 6> Dpose,
OptionalJacobian<2, 3> Dpoint) const { OptionalJacobian<2, 3> Dpoint) const {
Matrix3 Rt; // calculated by transform_to if needed Matrix3 Rt; // calculated by transformTo if needed
const Point3 q = pose().transform_to(point, boost::none, Dpoint ? &Rt : 0); const Point3 q = pose().transformTo(point, boost::none, Dpoint ? &Rt : 0);
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
if (q.z() <= 0) if (q.z() <= 0)
throw CheiralityException(); throw CheiralityException();

View File

@ -359,7 +359,7 @@ public:
Dresult_ddepth ? &Dpoint_ddepth : 0); Dresult_ddepth ? &Dpoint_ddepth : 0);
Matrix33 Dresult_dpoint; Matrix33 Dresult_dpoint;
const Point3 result = pose().transform_from(point, Dresult_dpose, const Point3 result = pose().transformFrom(point, Dresult_dpose,
(Dresult_ddepth || (Dresult_ddepth ||
Dresult_dp) ? &Dresult_dpoint : 0); Dresult_dp) ? &Dresult_dpoint : 0);

View File

@ -52,13 +52,13 @@ void EssentialMatrix::print(const string& s) const {
} }
/* ************************************************************************* */ /* ************************************************************************* */
Point3 EssentialMatrix::transform_to(const Point3& p, OptionalJacobian<3, 5> DE, Point3 EssentialMatrix::transformTo(const Point3& p, OptionalJacobian<3, 5> DE,
OptionalJacobian<3, 3> Dpoint) const { OptionalJacobian<3, 3> Dpoint) const {
Pose3 pose(rotation(), direction().point3()); Pose3 pose(rotation(), direction().point3());
Matrix36 DE_; Matrix36 DE_;
Point3 q = pose.transform_to(p, DE ? &DE_ : 0, Dpoint); Point3 q = pose.transformTo(p, DE ? &DE_ : 0, Dpoint);
if (DE) { if (DE) {
// DE returned by pose.transform_to is 3*6, but we need it to be 3*5 // DE returned by pose.transformTo is 3*6, but we need it to be 3*5
// The last 3 columns are derivative with respect to change in translation // The last 3 columns are derivative with respect to change in translation
// The derivative of translation with respect to a 2D sphere delta is 3*2 direction().basis() // The derivative of translation with respect to a 2D sphere delta is 3*2 direction().basis()
// Duy made an educated guess that this needs to be rotated to the local frame // Duy made an educated guess that this needs to be rotated to the local frame

View File

@ -138,7 +138,7 @@ class GTSAM_EXPORT EssentialMatrix {
* @param Dpoint optional 3*3 Jacobian wrpt point * @param Dpoint optional 3*3 Jacobian wrpt point
* @return point in pose coordinates * @return point in pose coordinates
*/ */
Point3 transform_to(const Point3& p, Point3 transformTo(const Point3& p,
OptionalJacobian<3, 5> DE = boost::none, OptionalJacobian<3, 5> DE = boost::none,
OptionalJacobian<3, 3> Dpoint = boost::none) const; OptionalJacobian<3, 3> Dpoint = boost::none) const;
@ -176,6 +176,17 @@ class GTSAM_EXPORT EssentialMatrix {
/// @} /// @}
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
Point3 transform_to(const Point3& p,
OptionalJacobian<3, 5> DE = boost::none,
OptionalJacobian<3, 3> Dpoint = boost::none) const {
return transformTo(p, DE, Dpoint);
};
/// @}
#endif
private: private:
/// @name Advanced Interface /// @name Advanced Interface
/// @{ /// @{

View File

@ -145,7 +145,7 @@ public:
(Dresult_dp || Dresult_dcal) ? &Dpoint_dpn : 0, (Dresult_dp || Dresult_dcal) ? &Dpoint_dpn : 0,
Dresult_ddepth ? &Dpoint_ddepth : 0); Dresult_ddepth ? &Dpoint_ddepth : 0);
Matrix33 Dresult_dpoint; Matrix33 Dresult_dpoint;
const Point3 result = pose().transform_from(point, Dresult_dpose, const Point3 result = pose().transformFrom(point, Dresult_dpose,
(Dresult_ddepth || (Dresult_ddepth ||
Dresult_dp || Dresult_dp ||
Dresult_dcal) ? &Dresult_dpoint : 0); Dresult_dcal) ? &Dresult_dpoint : 0);

View File

@ -198,7 +198,7 @@ Pose2 Pose2::inverse() const {
/* ************************************************************************* */ /* ************************************************************************* */
// see doc/math.lyx, SE(2) section // see doc/math.lyx, SE(2) section
Point2 Pose2::transform_to(const Point2& point, Point2 Pose2::transformTo(const Point2& point,
OptionalJacobian<2, 3> Hpose, OptionalJacobian<2, 2> Hpoint) const { OptionalJacobian<2, 3> Hpose, OptionalJacobian<2, 2> Hpoint) const {
OptionalJacobian<2, 2> Htranslation = Hpose.cols<2>(0); OptionalJacobian<2, 2> Htranslation = Hpose.cols<2>(0);
OptionalJacobian<2, 1> Hrotation = Hpose.cols<1>(2); OptionalJacobian<2, 1> Hrotation = Hpose.cols<1>(2);
@ -209,7 +209,7 @@ Point2 Pose2::transform_to(const Point2& point,
/* ************************************************************************* */ /* ************************************************************************* */
// see doc/math.lyx, SE(2) section // see doc/math.lyx, SE(2) section
Point2 Pose2::transform_from(const Point2& point, Point2 Pose2::transformFrom(const Point2& point,
OptionalJacobian<2, 3> Hpose, OptionalJacobian<2, 2> Hpoint) const { OptionalJacobian<2, 3> Hpose, OptionalJacobian<2, 2> Hpoint) const {
OptionalJacobian<2, 2> Htranslation = Hpose.cols<2>(0); OptionalJacobian<2, 2> Htranslation = Hpose.cols<2>(0);
OptionalJacobian<2, 1> Hrotation = Hpose.cols<1>(2); OptionalJacobian<2, 1> Hrotation = Hpose.cols<1>(2);
@ -223,7 +223,7 @@ Rot2 Pose2::bearing(const Point2& point,
OptionalJacobian<1, 3> Hpose, OptionalJacobian<1, 2> Hpoint) const { OptionalJacobian<1, 3> Hpose, OptionalJacobian<1, 2> Hpoint) const {
// make temporary matrices // make temporary matrices
Matrix23 D_d_pose; Matrix2 D_d_point; Matrix23 D_d_pose; Matrix2 D_d_point;
Point2 d = transform_to(point, Hpose ? &D_d_pose : 0, Hpoint ? &D_d_point : 0); Point2 d = transformTo(point, Hpose ? &D_d_pose : 0, Hpoint ? &D_d_point : 0);
if (!Hpose && !Hpoint) return Rot2::relativeBearing(d); if (!Hpose && !Hpoint) return Rot2::relativeBearing(d);
Matrix12 D_result_d; Matrix12 D_result_d;
Rot2 result = Rot2::relativeBearing(d, Hpose || Hpoint ? &D_result_d : 0); Rot2 result = Rot2::relativeBearing(d, Hpose || Hpoint ? &D_result_d : 0);
@ -288,7 +288,7 @@ double Pose2::range(const Pose2& pose,
/* ************************************************************************* /* *************************************************************************
* New explanation, from scan.ml * New explanation, from scan.ml
* It finds the angle using a linear method: * It finds the angle using a linear method:
* q = Pose2::transform_from(p) = t + R*p * q = Pose2::transformFrom(p) = t + R*p
* We need to remove the centroids from the data to find the rotation * We need to remove the centroids from the data to find the rotation
* using dp=[dpx;dpy] and q=[dqx;dqy] we have * using dp=[dpx;dpy] and q=[dqx;dqy] we have
* |dqx| |c -s| |dpx| |dpx -dpy| |c| * |dqx| |c -s| |dpx| |dpx -dpy| |c|

View File

@ -195,17 +195,17 @@ public:
/// @{ /// @{
/** Return point coordinates in pose coordinate frame */ /** Return point coordinates in pose coordinate frame */
Point2 transform_to(const Point2& point, Point2 transformTo(const Point2& point,
OptionalJacobian<2, 3> H1 = boost::none, OptionalJacobian<2, 3> Dpose = boost::none,
OptionalJacobian<2, 2> H2 = boost::none) const; OptionalJacobian<2, 2> Dpoint = boost::none) const;
/** Return point coordinates in global frame */ /** Return point coordinates in global frame */
Point2 transform_from(const Point2& point, Point2 transformFrom(const Point2& point,
OptionalJacobian<2, 3> H1 = boost::none, OptionalJacobian<2, 3> Dpose = boost::none,
OptionalJacobian<2, 2> H2 = boost::none) const; OptionalJacobian<2, 2> Dpoint = boost::none) const;
/** syntactic sugar for transform_from */ /** syntactic sugar for transformFrom */
inline Point2 operator*(const Point2& point) const { return transform_from(point);} inline Point2 operator*(const Point2& point) const { return transformFrom(point);}
/// @} /// @}
/// @name Standard Interface /// @name Standard Interface
@ -289,7 +289,23 @@ public:
/// @} /// @}
private: #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
Point2 transform_from(const Point2& point,
OptionalJacobian<2, 3> Dpose = boost::none,
OptionalJacobian<2, 2> Dpoint = boost::none) const {
return transformFrom(point, Dpose, Dpoint);
}
Point2 transform_to(const Point2& point,
OptionalJacobian<2, 3> Dpose = boost::none,
OptionalJacobian<2, 2> Dpoint = boost::none) const {
return transformTo(point, Dpose, Dpoint);
}
/// @}
#endif
private:
// Serialization function // Serialization function
friend class boost::serialization::access; friend class boost::serialization::access;
@ -313,7 +329,7 @@ inline Matrix wedge<Pose2>(const Vector& xi) {
/** /**
* Calculate pose between a vector of 2D point correspondences (p,q) * Calculate pose between a vector of 2D point correspondences (p,q)
* where q = Pose2::transform_from(p) = t + R*p * where q = Pose2::transformFrom(p) = t + R*p
*/ */
typedef std::pair<Point2,Point2> Point2Pair; typedef std::pair<Point2,Point2> Point2Pair;
GTSAM_EXPORT boost::optional<Pose2> align(const std::vector<Point2Pair>& pairs); GTSAM_EXPORT boost::optional<Pose2> align(const std::vector<Point2Pair>& pairs);

View File

@ -285,22 +285,31 @@ Matrix4 Pose3::matrix() const {
} }
/* ************************************************************************* */ /* ************************************************************************* */
Pose3 Pose3::transformPoseFrom(const Pose3& aTb) const {
const Pose3& wTa = *this;
return wTa * aTb;
}
/* ************************************************************************* */
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
Pose3 Pose3::transform_to(const Pose3& pose) const { Pose3 Pose3::transform_to(const Pose3& pose) const {
Rot3 cRv = R_ * Rot3(pose.R_.inverse()); Rot3 cRv = R_ * Rot3(pose.R_.inverse());
Point3 t = pose.transform_to(t_); Point3 t = pose.transform_to(t_);
return Pose3(cRv, t); return Pose3(cRv, t);
} }
#endif
/* ************************************************************************* */ /* ************************************************************************* */
Pose3 Pose3::transform_pose_to(const Pose3& pose, OptionalJacobian<6, 6> H1, Pose3 Pose3::transformPoseTo(const Pose3& wTb, OptionalJacobian<6, 6> H1,
OptionalJacobian<6, 6> H2) const { OptionalJacobian<6, 6> H2) const {
if (H1) *H1 = -pose.inverse().AdjointMap() * AdjointMap(); if (H1) *H1 = -wTb.inverse().AdjointMap() * AdjointMap();
if (H2) *H2 = I_6x6; if (H2) *H2 = I_6x6;
return inverse() * pose; const Pose3& wTa = *this;
return wTa.inverse() * wTb;
} }
/* ************************************************************************* */ /* ************************************************************************* */
Point3 Pose3::transform_from(const Point3& p, OptionalJacobian<3,6> Dpose, Point3 Pose3::transformFrom(const Point3& p, OptionalJacobian<3,6> Dpose,
OptionalJacobian<3,3> Dpoint) const { OptionalJacobian<3,3> Dpoint) const {
// Only get matrix once, to avoid multiple allocations, // Only get matrix once, to avoid multiple allocations,
// as well as multiple conversions in the Quaternion case // as well as multiple conversions in the Quaternion case
@ -316,7 +325,7 @@ Point3 Pose3::transform_from(const Point3& p, OptionalJacobian<3,6> Dpose,
} }
/* ************************************************************************* */ /* ************************************************************************* */
Point3 Pose3::transform_to(const Point3& p, OptionalJacobian<3,6> Dpose, Point3 Pose3::transformTo(const Point3& p, OptionalJacobian<3,6> Dpose,
OptionalJacobian<3,3> Dpoint) const { OptionalJacobian<3,3> Dpoint) const {
// Only get transpose once, to avoid multiple allocations, // Only get transpose once, to avoid multiple allocations,
// as well as multiple conversions in the Quaternion case // as well as multiple conversions in the Quaternion case
@ -340,7 +349,7 @@ double Pose3::range(const Point3& point, OptionalJacobian<1, 6> H1,
OptionalJacobian<1, 3> H2) const { OptionalJacobian<1, 3> H2) const {
Matrix36 D_local_pose; Matrix36 D_local_pose;
Matrix3 D_local_point; Matrix3 D_local_point;
Point3 local = transform_to(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0); Point3 local = transformTo(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0);
if (!H1 && !H2) { if (!H1 && !H2) {
return local.norm(); return local.norm();
} else { } else {
@ -366,7 +375,7 @@ Unit3 Pose3::bearing(const Point3& point, OptionalJacobian<2, 6> H1,
OptionalJacobian<2, 3> H2) const { OptionalJacobian<2, 3> H2) const {
Matrix36 D_local_pose; Matrix36 D_local_pose;
Matrix3 D_local_point; Matrix3 D_local_point;
Point3 local = transform_to(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0); Point3 local = transformTo(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0);
if (!H1 && !H2) { if (!H1 && !H2) {
return Unit3(local); return Unit3(local);
} else { } else {

View File

@ -81,7 +81,6 @@ public:
/** /**
* Create Pose3 by aligning two point pairs * Create Pose3 by aligning two point pairs
* A pose aTb is estimated between pairs (a_point, b_point) such that a_point = aTb * b_point * A pose aTb is estimated between pairs (a_point, b_point) such that a_point = aTb * b_point
* Meant to replace the deprecated function 'align', which orders the pairs the opposite way.
* Note this allows for noise on the points but in that case the mapping will not be exact. * Note this allows for noise on the points but in that case the mapping will not be exact.
*/ */
static boost::optional<Pose3> Align(const std::vector<Point3Pair>& abPointPairs); static boost::optional<Pose3> Align(const std::vector<Point3Pair>& abPointPairs);
@ -207,12 +206,12 @@ public:
* @param Dpoint optional 3*3 Jacobian wrpt point * @param Dpoint optional 3*3 Jacobian wrpt point
* @return point in world coordinates * @return point in world coordinates
*/ */
Point3 transform_from(const Point3& p, OptionalJacobian<3, 6> Dpose = Point3 transformFrom(const Point3& p, OptionalJacobian<3, 6> Dpose =
boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const; boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const;
/** syntactic sugar for transform_from */ /** syntactic sugar for transformFrom */
inline Point3 operator*(const Point3& p) const { inline Point3 operator*(const Point3& p) const {
return transform_from(p); return transformFrom(p);
} }
/** /**
@ -222,7 +221,7 @@ public:
* @param Dpoint optional 3*3 Jacobian wrpt point * @param Dpoint optional 3*3 Jacobian wrpt point
* @return point in Pose coordinates * @return point in Pose coordinates
*/ */
Point3 transform_to(const Point3& p, OptionalJacobian<3, 6> Dpose = Point3 transformTo(const Point3& p, OptionalJacobian<3, 6> Dpose =
boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const; boost::none, OptionalJacobian<3, 3> Dpoint = boost::none) const;
/// @} /// @}
@ -253,13 +252,11 @@ public:
/** convert to 4*4 matrix */ /** convert to 4*4 matrix */
Matrix4 matrix() const; Matrix4 matrix() const;
/** receives a pose in local coordinates and transforms it to world coordinates /** receives a pose in local coordinates and transforms it to world coordinates */
* @deprecated: This is actually equivalent to transform_from, so it is WRONG! Use Pose3 transformPoseFrom(const Pose3& pose) const;
* transform_pose_to instead. */
Pose3 transform_to(const Pose3& pose) const;
/** receives a pose in world coordinates and transforms it to local coordinates */ /** receives a pose in world coordinates and transforms it to local coordinates */
Pose3 transform_pose_to(const Pose3& pose, OptionalJacobian<6, 6> H1 = boost::none, Pose3 transformPoseTo(const Pose3& pose, OptionalJacobian<6, 6> H1 = boost::none,
OptionalJacobian<6, 6> H2 = boost::none) const; OptionalJacobian<6, 6> H2 = boost::none) const;
/** /**
@ -321,6 +318,30 @@ public:
GTSAM_EXPORT GTSAM_EXPORT
friend std::ostream &operator<<(std::ostream &os, const Pose3& p); friend std::ostream &operator<<(std::ostream &os, const Pose3& p);
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
Point3 transform_from(const Point3& p,
OptionalJacobian<3, 6> Dpose = boost::none,
OptionalJacobian<3, 3> Dpoint = boost::none) const {
return transformFrom(p, Dpose, Dpoint);
}
Point3 transform_to(const Point3& p,
OptionalJacobian<3, 6> Dpose = boost::none,
OptionalJacobian<3, 3> Dpoint = boost::none) const {
return transformTo(p, Dpose, Dpoint);
}
Pose3 transform_pose_to(const Pose3& pose,
OptionalJacobian<6, 6> H1 = boost::none,
OptionalJacobian<6, 6> H2 = boost::none) const {
return transformPoseTo(pose, H1, H2);
}
/**
* @deprecated: this function is neither here not there. */
Pose3 transform_to(const Pose3& pose) const;
/// @}
#endif
private: private:
/** Serialization function */ /** Serialization function */
friend class boost::serialization::access; friend class boost::serialization::access;
@ -351,12 +372,10 @@ inline Matrix wedge<Pose3>(const Vector& xi) {
return Pose3::wedge(xi(0), xi(1), xi(2), xi(3), xi(4), xi(5)); return Pose3::wedge(xi(0), xi(1), xi(2), xi(3), xi(4), xi(5));
} }
/** #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
* Calculate pose between a vector of 3D point correspondences (b_point, a_point) // deprecated: use Pose3::Align with point pairs ordered the opposite way
* where a_point = Pose3::transform_from(b_point) = t + R*b_point
* @deprecated: use Pose3::Align with point pairs ordered the opposite way
*/
GTSAM_EXPORT boost::optional<Pose3> align(const std::vector<Point3Pair>& baPointPairs); GTSAM_EXPORT boost::optional<Pose3> align(const std::vector<Point3Pair>& baPointPairs);
#endif
// For MATLAB wrapper // For MATLAB wrapper
typedef std::vector<Pose3> Pose3Vector; typedef std::vector<Pose3> Pose3Vector;

View File

@ -31,7 +31,6 @@ namespace so3 {
void ExpmapFunctor::init(bool nearZeroApprox) { void ExpmapFunctor::init(bool nearZeroApprox) {
nearZero = nearZeroApprox || (theta2 <= std::numeric_limits<double>::epsilon()); nearZero = nearZeroApprox || (theta2 <= std::numeric_limits<double>::epsilon());
if (!nearZero) { if (!nearZero) {
theta = std::sqrt(theta2); // rotation angle
sin_theta = std::sin(theta); sin_theta = std::sin(theta);
const double s2 = std::sin(theta / 2.0); const double s2 = std::sin(theta / 2.0);
one_minus_cos = 2.0 * s2 * s2; // numerically better than [1 - cos(theta)] one_minus_cos = 2.0 * s2 * s2; // numerically better than [1 - cos(theta)]
@ -39,7 +38,7 @@ void ExpmapFunctor::init(bool nearZeroApprox) {
} }
ExpmapFunctor::ExpmapFunctor(const Vector3& omega, bool nearZeroApprox) ExpmapFunctor::ExpmapFunctor(const Vector3& omega, bool nearZeroApprox)
: theta2(omega.dot(omega)) { : theta2(omega.dot(omega)), theta(std::sqrt(theta2)) {
const double wx = omega.x(), wy = omega.y(), wz = omega.z(); const double wx = omega.x(), wy = omega.y(), wz = omega.z();
W << 0.0, -wz, +wy, +wz, 0.0, -wx, -wy, +wx, 0.0; W << 0.0, -wz, +wy, +wz, 0.0, -wx, -wy, +wx, 0.0;
init(nearZeroApprox); init(nearZeroApprox);
@ -50,7 +49,7 @@ ExpmapFunctor::ExpmapFunctor(const Vector3& omega, bool nearZeroApprox)
} }
ExpmapFunctor::ExpmapFunctor(const Vector3& axis, double angle, bool nearZeroApprox) ExpmapFunctor::ExpmapFunctor(const Vector3& axis, double angle, bool nearZeroApprox)
: theta2(angle * angle) { : theta2(angle * angle), theta(angle) {
const double ax = axis.x(), ay = axis.y(), az = axis.z(); const double ax = axis.x(), ay = axis.y(), az = axis.z();
K << 0.0, -az, +ay, +az, 0.0, -ax, -ay, +ax, 0.0; K << 0.0, -az, +ay, +az, 0.0, -ax, -ay, +ax, 0.0;
W = K * angle; W = K * angle;

View File

@ -37,7 +37,7 @@ namespace gtsam {
StereoPoint2 StereoCamera::project2(const Point3& point, StereoPoint2 StereoCamera::project2(const Point3& point,
OptionalJacobian<3,6> H1, OptionalJacobian<3,3> H2) const { OptionalJacobian<3,6> H1, OptionalJacobian<3,3> H2) const {
const Point3 q = leftCamPose_.transform_to(point); const Point3 q = leftCamPose_.transformTo(point);
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
if (q.z() <= 0) if (q.z() <= 0)
@ -94,7 +94,7 @@ namespace gtsam {
double Z = K_->baseline() * K_->fx() / (measured[0] - measured[1]); double Z = K_->baseline() * K_->fx() / (measured[0] - measured[1]);
double X = Z * (measured[0] - K_->px()) / K_->fx(); double X = Z * (measured[0] - K_->px()) / K_->fx();
double Y = Z * (measured[2] - K_->py()) / K_->fy(); double Y = Z * (measured[2] - K_->py()) / K_->fy();
Point3 point = leftCamPose_.transform_from(Point3(X, Y, Z)); Point3 point = leftCamPose_.transformFrom(Point3(X, Y, Z));
return point; return point;
} }
@ -120,7 +120,7 @@ namespace gtsam {
-z_partial_uR, z_partial_uR, 0; -z_partial_uR, z_partial_uR, 0;
Matrix3 D_point_local; Matrix3 D_point_local;
const Point3 point = leftCamPose_.transform_from(local, H1, D_point_local); const Point3 point = leftCamPose_.transformFrom(local, H1, D_point_local);
if(H2) { if(H2) {
*H2 = D_point_local * D_local_z; *H2 = D_point_local * D_local_z;
@ -129,7 +129,7 @@ namespace gtsam {
return point; return point;
} }
return leftCamPose_.transform_from(local); return leftCamPose_.transformFrom(local);
} }
} }

View File

@ -134,7 +134,7 @@ TEST( CalibratedCamera, Dproject_point_pose)
Point2 result = camera.project(point1, Dpose, Dpoint); Point2 result = camera.project(point1, Dpose, Dpoint);
Matrix numerical_pose = numericalDerivative21(project2, camera, point1); Matrix numerical_pose = numericalDerivative21(project2, camera, point1);
Matrix numerical_point = numericalDerivative22(project2, camera, point1); Matrix numerical_point = numericalDerivative22(project2, camera, point1);
CHECK(assert_equal(Point3(-0.08, 0.08, 0.5), camera.pose().transform_to(point1))); CHECK(assert_equal(Point3(-0.08, 0.08, 0.5), camera.pose().transformTo(point1)));
CHECK(assert_equal(Point2(-.16, .16), result)); CHECK(assert_equal(Point2(-.16, .16), result));
CHECK(assert_equal(numerical_pose, Dpose, 1e-7)); CHECK(assert_equal(numerical_pose, Dpose, 1e-7));
CHECK(assert_equal(numerical_point, Dpoint, 1e-7)); CHECK(assert_equal(numerical_point, Dpoint, 1e-7));

View File

@ -115,9 +115,9 @@ TEST (EssentialMatrix, RoundTrip) {
//************************************************************************* //*************************************************************************
Point3 transform_to_(const EssentialMatrix& E, const Point3& point) { Point3 transform_to_(const EssentialMatrix& E, const Point3& point) {
return E.transform_to(point); return E.transformTo(point);
} }
TEST (EssentialMatrix, transform_to) { TEST (EssentialMatrix, transformTo) {
// test with a more complicated EssentialMatrix // test with a more complicated EssentialMatrix
Rot3 aRb2 = Rot3::Yaw(M_PI / 3.0) * Rot3::Pitch(M_PI_4) Rot3 aRb2 = Rot3::Yaw(M_PI / 3.0) * Rot3::Pitch(M_PI_4)
* Rot3::Roll(M_PI / 6.0); * Rot3::Roll(M_PI / 6.0);
@ -126,7 +126,7 @@ TEST (EssentialMatrix, transform_to) {
//EssentialMatrix E(aRb, Unit3(aTb).retract(Vector2(0.1, 0))); //EssentialMatrix E(aRb, Unit3(aTb).retract(Vector2(0.1, 0)));
static Point3 P(0.2, 0.7, -2); static Point3 P(0.2, 0.7, -2);
Matrix actH1, actH2; Matrix actH1, actH2;
E.transform_to(P, actH1, actH2); E.transformTo(P, actH1, actH2);
Matrix expH1 = numericalDerivative21(transform_to_, E, P), // Matrix expH1 = numericalDerivative21(transform_to_, E, P), //
expH2 = numericalDerivative22(transform_to_, E, P); expH2 = numericalDerivative22(transform_to_, E, P);
EXPECT(assert_equal(expH1, actH1, 1e-8)); EXPECT(assert_equal(expH1, actH1, 1e-8));

View File

@ -266,45 +266,44 @@ TEST( Pose2, LogmapDerivative2) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
static Point2 transform_to_proxy(const Pose2& pose, const Point2& point) { static Point2 transformTo_(const Pose2& pose, const Point2& point) {
return pose.transform_to(point); return pose.transformTo(point);
} }
TEST( Pose2, transform_to ) TEST(Pose2, transformTo) {
{ Pose2 pose(M_PI / 2.0, Point2(1, 2)); // robot at (1,2) looking towards y
Pose2 pose(M_PI/2.0, Point2(1,2)); // robot at (1,2) looking towards y Point2 point(-1, 4); // landmark at (-1,4)
Point2 point(-1,4); // landmark at (-1,4)
// expected // expected
Point2 expected(2,2); Point2 expected(2, 2);
Matrix expectedH1 = (Matrix(2,3) << -1.0, 0.0, 2.0, 0.0, -1.0, -2.0).finished(); Matrix expectedH1 =
Matrix expectedH2 = (Matrix(2,2) << 0.0, 1.0, -1.0, 0.0).finished(); (Matrix(2, 3) << -1.0, 0.0, 2.0, 0.0, -1.0, -2.0).finished();
Matrix expectedH2 = (Matrix(2, 2) << 0.0, 1.0, -1.0, 0.0).finished();
// actual // actual
Matrix actualH1, actualH2; Matrix actualH1, actualH2;
Point2 actual = pose.transform_to(point, actualH1, actualH2); Point2 actual = pose.transformTo(point, actualH1, actualH2);
EXPECT(assert_equal(expected,actual)); EXPECT(assert_equal(expected, actual));
EXPECT(assert_equal(expectedH1,actualH1)); EXPECT(assert_equal(expectedH1, actualH1));
Matrix numericalH1 = numericalDerivative21(transform_to_proxy, pose, point); Matrix numericalH1 = numericalDerivative21(transformTo_, pose, point);
EXPECT(assert_equal(numericalH1,actualH1)); EXPECT(assert_equal(numericalH1, actualH1));
EXPECT(assert_equal(expectedH2,actualH2)); EXPECT(assert_equal(expectedH2, actualH2));
Matrix numericalH2 = numericalDerivative22(transform_to_proxy, pose, point); Matrix numericalH2 = numericalDerivative22(transformTo_, pose, point);
EXPECT(assert_equal(numericalH2,actualH2)); EXPECT(assert_equal(numericalH2, actualH2));
} }
/* ************************************************************************* */ /* ************************************************************************* */
static Point2 transform_from_proxy(const Pose2& pose, const Point2& point) { static Point2 transformFrom_(const Pose2& pose, const Point2& point) {
return pose.transform_from(point); return pose.transformFrom(point);
} }
TEST (Pose2, transform_from) TEST(Pose2, transformFrom) {
{ Pose2 pose(1., 0., M_PI / 2.0);
Pose2 pose(1., 0., M_PI/2.0);
Point2 pt(2., 1.); Point2 pt(2., 1.);
Matrix H1, H2; Matrix H1, H2;
Point2 actual = pose.transform_from(pt, H1, H2); Point2 actual = pose.transformFrom(pt, H1, H2);
Point2 expected(0., 2.); Point2 expected(0., 2.);
EXPECT(assert_equal(expected, actual)); EXPECT(assert_equal(expected, actual));
@ -312,11 +311,11 @@ TEST (Pose2, transform_from)
Matrix H1_expected = (Matrix(2, 3) << 0., -1., -2., 1., 0., -1.).finished(); Matrix H1_expected = (Matrix(2, 3) << 0., -1., -2., 1., 0., -1.).finished();
Matrix H2_expected = (Matrix(2, 2) << 0., -1., 1., 0.).finished(); Matrix H2_expected = (Matrix(2, 2) << 0., -1., 1., 0.).finished();
Matrix numericalH1 = numericalDerivative21(transform_from_proxy, pose, pt); Matrix numericalH1 = numericalDerivative21(transformFrom_, pose, pt);
EXPECT(assert_equal(H1_expected, H1)); EXPECT(assert_equal(H1_expected, H1));
EXPECT(assert_equal(H1_expected, numericalH1)); EXPECT(assert_equal(H1_expected, numericalH1));
Matrix numericalH2 = numericalDerivative22(transform_from_proxy, pose, pt); Matrix numericalH2 = numericalDerivative22(transformFrom_, pose, pt);
EXPECT(assert_equal(H2_expected, H2)); EXPECT(assert_equal(H2_expected, H2));
EXPECT(assert_equal(H2_expected, numericalH2)); EXPECT(assert_equal(H2_expected, numericalH2));
} }
@ -349,8 +348,8 @@ TEST(Pose2, compose_a)
Point2 point(sqrt(0.5), 3.0*sqrt(0.5)); Point2 point(sqrt(0.5), 3.0*sqrt(0.5));
Point2 expected_point(-1.0, -1.0); Point2 expected_point(-1.0, -1.0);
Point2 actual_point1 = (pose1 * pose2).transform_to(point); Point2 actual_point1 = (pose1 * pose2).transformTo(point);
Point2 actual_point2 = pose2.transform_to(pose1.transform_to(point)); Point2 actual_point2 = pose2.transformTo(pose1.transformTo(point));
EXPECT(assert_equal(expected_point, actual_point1)); EXPECT(assert_equal(expected_point, actual_point1));
EXPECT(assert_equal(expected_point, actual_point2)); EXPECT(assert_equal(expected_point, actual_point2));
} }
@ -735,7 +734,7 @@ TEST(Pose2, align_2) {
vector<Point2Pair> correspondences; vector<Point2Pair> correspondences;
Point2 p1(0,0), p2(10,0); Point2 p1(0,0), p2(10,0);
Point2 q1 = expected.transform_from(p1), q2 = expected.transform_from(p2); Point2 q1 = expected.transformFrom(p1), q2 = expected.transformFrom(p2);
EXPECT(assert_equal(Point2(20,10),q1)); EXPECT(assert_equal(Point2(20,10),q1));
EXPECT(assert_equal(Point2(20,20),q2)); EXPECT(assert_equal(Point2(20,20),q2));
Point2Pair pq1(make_pair(p1, q1)); Point2Pair pq1(make_pair(p1, q1));
@ -750,7 +749,7 @@ namespace align_3 {
Point2 t(10,10); Point2 t(10,10);
Pose2 expected(Rot2::fromAngle(2*M_PI/3), t); Pose2 expected(Rot2::fromAngle(2*M_PI/3), t);
Point2 p1(0,0), p2(10,0), p3(10,10); Point2 p1(0,0), p2(10,0), p3(10,10);
Point2 q1 = expected.transform_from(p1), q2 = expected.transform_from(p2), q3 = expected.transform_from(p3); Point2 q1 = expected.transformFrom(p1), q2 = expected.transformFrom(p2), q3 = expected.transformFrom(p3);
} }
TEST(Pose2, align_3) { TEST(Pose2, align_3) {

View File

@ -316,226 +316,164 @@ TEST( Pose3, compose_inverse)
} }
/* ************************************************************************* */ /* ************************************************************************* */
Point3 transform_from_(const Pose3& pose, const Point3& point) { return pose.transform_from(point); } Point3 transformFrom_(const Pose3& pose, const Point3& point) {
TEST( Pose3, Dtransform_from1_a) return pose.transformFrom(point);
{ }
TEST(Pose3, Dtransform_from1_a) {
Matrix actualDtransform_from1; Matrix actualDtransform_from1;
T.transform_from(P, actualDtransform_from1, boost::none); T.transformFrom(P, actualDtransform_from1, boost::none);
Matrix numerical = numericalDerivative21(transform_from_,T,P); Matrix numerical = numericalDerivative21(transformFrom_, T, P);
EXPECT(assert_equal(numerical,actualDtransform_from1,1e-8)); EXPECT(assert_equal(numerical, actualDtransform_from1, 1e-8));
} }
TEST( Pose3, Dtransform_from1_b) TEST(Pose3, Dtransform_from1_b) {
{
Pose3 origin; Pose3 origin;
Matrix actualDtransform_from1; Matrix actualDtransform_from1;
origin.transform_from(P, actualDtransform_from1, boost::none); origin.transformFrom(P, actualDtransform_from1, boost::none);
Matrix numerical = numericalDerivative21(transform_from_,origin,P); Matrix numerical = numericalDerivative21(transformFrom_, origin, P);
EXPECT(assert_equal(numerical,actualDtransform_from1,1e-8)); EXPECT(assert_equal(numerical, actualDtransform_from1, 1e-8));
} }
TEST( Pose3, Dtransform_from1_c) TEST(Pose3, Dtransform_from1_c) {
{ Point3 origin(0, 0, 0);
Point3 origin(0,0,0); Pose3 T0(R, origin);
Pose3 T0(R,origin);
Matrix actualDtransform_from1; Matrix actualDtransform_from1;
T0.transform_from(P, actualDtransform_from1, boost::none); T0.transformFrom(P, actualDtransform_from1, boost::none);
Matrix numerical = numericalDerivative21(transform_from_,T0,P); Matrix numerical = numericalDerivative21(transformFrom_, T0, P);
EXPECT(assert_equal(numerical,actualDtransform_from1,1e-8)); EXPECT(assert_equal(numerical, actualDtransform_from1, 1e-8));
} }
TEST( Pose3, Dtransform_from1_d) TEST(Pose3, Dtransform_from1_d) {
{
Rot3 I; Rot3 I;
Point3 t0(100,0,0); Point3 t0(100, 0, 0);
Pose3 T0(I,t0); Pose3 T0(I, t0);
Matrix actualDtransform_from1; Matrix actualDtransform_from1;
T0.transform_from(P, actualDtransform_from1, boost::none); T0.transformFrom(P, actualDtransform_from1, boost::none);
//print(computed, "Dtransform_from1_d computed:"); // print(computed, "Dtransform_from1_d computed:");
Matrix numerical = numericalDerivative21(transform_from_,T0,P); Matrix numerical = numericalDerivative21(transformFrom_, T0, P);
//print(numerical, "Dtransform_from1_d numerical:"); // print(numerical, "Dtransform_from1_d numerical:");
EXPECT(assert_equal(numerical,actualDtransform_from1,1e-8)); EXPECT(assert_equal(numerical, actualDtransform_from1, 1e-8));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, Dtransform_from2) TEST(Pose3, Dtransform_from2) {
{
Matrix actualDtransform_from2; Matrix actualDtransform_from2;
T.transform_from(P, boost::none, actualDtransform_from2); T.transformFrom(P, boost::none, actualDtransform_from2);
Matrix numerical = numericalDerivative22(transform_from_,T,P); Matrix numerical = numericalDerivative22(transformFrom_, T, P);
EXPECT(assert_equal(numerical,actualDtransform_from2,1e-8)); EXPECT(assert_equal(numerical, actualDtransform_from2, 1e-8));
} }
/* ************************************************************************* */ /* ************************************************************************* */
Point3 transform_to_(const Pose3& pose, const Point3& point) { return pose.transform_to(point); } Point3 transform_to_(const Pose3& pose, const Point3& point) {
TEST( Pose3, Dtransform_to1) return pose.transformTo(point);
{ }
TEST(Pose3, Dtransform_to1) {
Matrix computed; Matrix computed;
T.transform_to(P, computed, boost::none); T.transformTo(P, computed, boost::none);
Matrix numerical = numericalDerivative21(transform_to_,T,P); Matrix numerical = numericalDerivative21(transform_to_, T, P);
EXPECT(assert_equal(numerical,computed,1e-8)); EXPECT(assert_equal(numerical, computed, 1e-8));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, Dtransform_to2) TEST(Pose3, Dtransform_to2) {
{
Matrix computed; Matrix computed;
T.transform_to(P, boost::none, computed); T.transformTo(P, boost::none, computed);
Matrix numerical = numericalDerivative22(transform_to_,T,P); Matrix numerical = numericalDerivative22(transform_to_, T, P);
EXPECT(assert_equal(numerical,computed,1e-8)); EXPECT(assert_equal(numerical, computed, 1e-8));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, transform_to_with_derivatives) TEST(Pose3, transform_to_with_derivatives) {
{
Matrix actH1, actH2; Matrix actH1, actH2;
T.transform_to(P,actH1,actH2); T.transformTo(P, actH1, actH2);
Matrix expH1 = numericalDerivative21(transform_to_, T,P), Matrix expH1 = numericalDerivative21(transform_to_, T, P),
expH2 = numericalDerivative22(transform_to_, T,P); expH2 = numericalDerivative22(transform_to_, T, P);
EXPECT(assert_equal(expH1, actH1, 1e-8)); EXPECT(assert_equal(expH1, actH1, 1e-8));
EXPECT(assert_equal(expH2, actH2, 1e-8)); EXPECT(assert_equal(expH2, actH2, 1e-8));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, transform_from_with_derivatives) TEST(Pose3, transform_from_with_derivatives) {
{
Matrix actH1, actH2; Matrix actH1, actH2;
T.transform_from(P,actH1,actH2); T.transformFrom(P, actH1, actH2);
Matrix expH1 = numericalDerivative21(transform_from_, T,P), Matrix expH1 = numericalDerivative21(transformFrom_, T, P),
expH2 = numericalDerivative22(transform_from_, T,P); expH2 = numericalDerivative22(transformFrom_, T, P);
EXPECT(assert_equal(expH1, actH1, 1e-8)); EXPECT(assert_equal(expH1, actH1, 1e-8));
EXPECT(assert_equal(expH2, actH2, 1e-8)); EXPECT(assert_equal(expH2, actH2, 1e-8));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, transform_to_translate) TEST(Pose3, transform_to_translate) {
{ Point3 actual =
Point3 actual = Pose3(Rot3(), Point3(1, 2, 3)).transform_to(Point3(10.,20.,30.)); Pose3(Rot3(), Point3(1, 2, 3)).transformTo(Point3(10., 20., 30.));
Point3 expected(9.,18.,27.); Point3 expected(9., 18., 27.);
EXPECT(assert_equal(expected, actual)); EXPECT(assert_equal(expected, actual));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, transform_to_rotate) TEST(Pose3, transform_to_rotate) {
{ Pose3 transform(Rot3::Rodrigues(0, 0, -1.570796), Point3(0, 0, 0));
Pose3 transform(Rot3::Rodrigues(0,0,-1.570796), Point3(0,0,0)); Point3 actual = transform.transformTo(Point3(2, 1, 10));
Point3 actual = transform.transform_to(Point3(2,1,10)); Point3 expected(-1, 2, 10);
Point3 expected(-1,2,10);
EXPECT(assert_equal(expected, actual, 0.001)); EXPECT(assert_equal(expected, actual, 0.001));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, transform_to) TEST(Pose3, transformTo) {
{ Pose3 transform(Rot3::Rodrigues(0, 0, -1.570796), Point3(2, 4, 0));
Pose3 transform(Rot3::Rodrigues(0,0,-1.570796), Point3(2,4, 0)); Point3 actual = transform.transformTo(Point3(3, 2, 10));
Point3 actual = transform.transform_to(Point3(3,2,10)); Point3 expected(2, 1, 10);
Point3 expected(2,1,10);
EXPECT(assert_equal(expected, actual, 0.001)); EXPECT(assert_equal(expected, actual, 0.001));
} }
Pose3 transform_pose_to_(const Pose3& pose, const Pose3& pose2) { return pose.transform_pose_to(pose2); } Pose3 transformPoseTo_(const Pose3& pose, const Pose3& pose2) {
return pose.transformPoseTo(pose2);
}
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, transform_pose_to) TEST(Pose3, transformPoseTo) {
{ Pose3 origin = T.transformPoseTo(T);
Pose3 origin = T.transform_pose_to(T);
EXPECT(assert_equal(Pose3{}, origin)); EXPECT(assert_equal(Pose3{}, origin));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, transform_pose_to_with_derivatives) TEST(Pose3, transformPoseTo_with_derivatives) {
{
Matrix actH1, actH2; Matrix actH1, actH2;
Pose3 res = T.transform_pose_to(T2,actH1,actH2); Pose3 res = T.transformPoseTo(T2, actH1, actH2);
EXPECT(assert_equal(res, T.inverse().compose(T2))); EXPECT(assert_equal(res, T.inverse().compose(T2)));
Matrix expH1 = numericalDerivative21(transform_pose_to_, T, T2), Matrix expH1 = numericalDerivative21(transformPoseTo_, T, T2),
expH2 = numericalDerivative22(transform_pose_to_, T, T2); expH2 = numericalDerivative22(transformPoseTo_, T, T2);
EXPECT(assert_equal(expH1, actH1, 1e-8)); EXPECT(assert_equal(expH1, actH1, 1e-8));
EXPECT(assert_equal(expH2, actH2, 1e-8)); EXPECT(assert_equal(expH2, actH2, 1e-8));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, transform_pose_to_with_derivatives2) TEST(Pose3, transformPoseTo_with_derivatives2) {
{
Matrix actH1, actH2; Matrix actH1, actH2;
Pose3 res = T.transform_pose_to(T3,actH1,actH2); Pose3 res = T.transformPoseTo(T3, actH1, actH2);
EXPECT(assert_equal(res, T.inverse().compose(T3))); EXPECT(assert_equal(res, T.inverse().compose(T3)));
Matrix expH1 = numericalDerivative21(transform_pose_to_, T, T3), Matrix expH1 = numericalDerivative21(transformPoseTo_, T, T3),
expH2 = numericalDerivative22(transform_pose_to_, T, T3); expH2 = numericalDerivative22(transformPoseTo_, T, T3);
EXPECT(assert_equal(expH1, actH1, 1e-8)); EXPECT(assert_equal(expH1, actH1, 1e-8));
EXPECT(assert_equal(expH2, actH2, 1e-8)); EXPECT(assert_equal(expH2, actH2, 1e-8));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, transform_from) TEST(Pose3, transformFrom) {
{ Point3 actual = T3.transformFrom(Point3(0, 0, 0));
Point3 actual = T3.transform_from(Point3(0,0,0)); Point3 expected = Point3(1., 2., 3.);
Point3 expected = Point3(1.,2.,3.);
EXPECT(assert_equal(expected, actual)); EXPECT(assert_equal(expected, actual));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Pose3, transform_roundtrip) TEST(Pose3, transform_roundtrip) {
{ Point3 actual = T3.transformFrom(T3.transformTo(Point3(12., -0.11, 7.0)));
Point3 actual = T3.transform_from(T3.transform_to(Point3(12., -0.11,7.0))); Point3 expected(12., -0.11, 7.0);
Point3 expected(12., -0.11,7.0);
EXPECT(assert_equal(expected, actual)); EXPECT(assert_equal(expected, actual));
} }
/* ************************************************************************* */
TEST( Pose3, transformPose_to_origin)
{
// transform to origin
Pose3 actual = T3.transform_to(Pose3());
EXPECT(assert_equal(T3, actual, 1e-8));
}
/* ************************************************************************* */
TEST( Pose3, transformPose_to_itself)
{
// transform to itself
Pose3 actual = T3.transform_to(T3);
EXPECT(assert_equal(Pose3(), actual, 1e-8));
}
/* ************************************************************************* */
TEST( Pose3, transformPose_to_translation)
{
// transform translation only
Rot3 r = Rot3::Rodrigues(-1.570796,0,0);
Pose3 pose2(r, Point3(21.,32.,13.));
Pose3 actual = pose2.transform_to(Pose3(Rot3(), Point3(1,2,3)));
Pose3 expected(r, Point3(20.,30.,10.));
EXPECT(assert_equal(expected, actual, 1e-8));
}
/* ************************************************************************* */
TEST( Pose3, transformPose_to_simple_rotate)
{
// transform translation only
Rot3 r = Rot3::Rodrigues(0,0,-1.570796);
Pose3 pose2(r, Point3(21.,32.,13.));
Pose3 transform(r, Point3(1,2,3));
Pose3 actual = pose2.transform_to(transform);
Pose3 expected(Rot3(), Point3(-30.,20.,10.));
EXPECT(assert_equal(expected, actual, 0.001));
}
/* ************************************************************************* */
TEST( Pose3, transformPose_to)
{
// transform to
Rot3 r = Rot3::Rodrigues(0,0,-1.570796); //-90 degree yaw
Rot3 r2 = Rot3::Rodrigues(0,0,0.698131701); //40 degree yaw
Pose3 pose2(r2, Point3(21.,32.,13.));
Pose3 transform(r, Point3(1,2,3));
Pose3 actual = pose2.transform_to(transform);
Pose3 expected(Rot3::Rodrigues(0,0,2.26892803), Point3(-30.,20.,10.));
EXPECT(assert_equal(expected, actual, 0.001));
}
/* ************************************************************************* */ /* ************************************************************************* */
TEST(Pose3, Retract_LocalCoordinates) TEST(Pose3, Retract_LocalCoordinates)
{ {
@ -789,9 +727,9 @@ TEST(Pose3, Align2) {
vector<Point3Pair> correspondences; vector<Point3Pair> correspondences;
Point3 p1(0,0,1), p2(10,0,2), p3(20,-10,30); Point3 p1(0,0,1), p2(10,0,2), p3(20,-10,30);
Point3 q1 = expected.transform_from(p1), Point3 q1 = expected.transformFrom(p1),
q2 = expected.transform_from(p2), q2 = expected.transformFrom(p2),
q3 = expected.transform_from(p3); q3 = expected.transformFrom(p3);
Point3Pair ab1(make_pair(q1, p1)); Point3Pair ab1(make_pair(q1, p1));
Point3Pair ab2(make_pair(q2, p2)); Point3Pair ab2(make_pair(q2, p2));
Point3Pair ab3(make_pair(q3, p3)); Point3Pair ab3(make_pair(q3, p3));

View File

@ -103,6 +103,20 @@ Rot3 slow_but_correct_Rodrigues(const Vector& w) {
return Rot3(R); return Rot3(R);
} }
/* ************************************************************************* */
TEST( Rot3, AxisAngle)
{
Vector axis = Vector3(0., 1., 0.); // rotation around Y
double angle = 3.14 / 4.0;
Rot3 expected(0.707388, 0, 0.706825,
0, 1, 0,
-0.706825, 0, 0.707388);
Rot3 actual = Rot3::AxisAngle(axis, angle);
CHECK(assert_equal(expected,actual,1e-5));
Rot3 actual2 = Rot3::AxisAngle(axis, angle-2*M_PI);
CHECK(assert_equal(expected,actual2,1e-5));
}
/* ************************************************************************* */ /* ************************************************************************* */
TEST( Rot3, Rodrigues) TEST( Rot3, Rodrigues)
{ {

View File

@ -82,6 +82,34 @@ TEST(SO3, ChartDerivatives) {
CHECK_CHART_DERIVATIVES(R2, R1); CHECK_CHART_DERIVATIVES(R2, R1);
} }
/* ************************************************************************* */
TEST(SO3, Expmap) {
Vector axis = Vector3(0., 1., 0.); // rotation around Y
double angle = 3.14 / 4.0;
Matrix expected(3,3);
expected << 0.707388, 0, 0.706825, 0, 1, 0, -0.706825, 0, 0.707388;
// axis angle version
so3::ExpmapFunctor f1(axis, angle);
SO3 actual1 = f1.expmap();
CHECK(assert_equal(expected, actual1.matrix(), 1e-5));
// axis angle version, negative angle
so3::ExpmapFunctor f2(axis, angle - 2*M_PI);
SO3 actual2 = f2.expmap();
CHECK(assert_equal(expected, actual2.matrix(), 1e-5));
// omega version
so3::ExpmapFunctor f3(axis * angle);
SO3 actual3 = f3.expmap();
CHECK(assert_equal(expected, actual3.matrix(), 1e-5));
// omega version, negative angle
so3::ExpmapFunctor f4(axis * (angle - 2*M_PI));
SO3 actual4 = f4.expmap();
CHECK(assert_equal(expected, actual4.matrix(), 1e-5));
}
/* ************************************************************************* */ /* ************************************************************************* */
namespace exmap_derivative { namespace exmap_derivative {
static const Vector3 w(0.1, 0.27, -0.2); static const Vector3 w(0.1, 0.27, -0.2);

View File

@ -256,7 +256,7 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
// verify that the triangulated point lies in front of all cameras // verify that the triangulated point lies in front of all cameras
for(const Pose3& pose: poses) { for(const Pose3& pose: poses) {
const Point3& p_local = pose.transform_to(point); const Point3& p_local = pose.transformTo(point);
if (p_local.z() <= 0) if (p_local.z() <= 0)
throw(TriangulationCheiralityException()); throw(TriangulationCheiralityException());
} }
@ -304,7 +304,7 @@ Point3 triangulatePoint3(
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
// verify that the triangulated point lies in front of all cameras // verify that the triangulated point lies in front of all cameras
for(const CAMERA& camera: cameras) { for(const CAMERA& camera: cameras) {
const Point3& p_local = camera.pose().transform_to(point); const Point3& p_local = camera.pose().transformTo(point);
if (p_local.z() <= 0) if (p_local.z() <= 0)
throw(TriangulationCheiralityException()); throw(TriangulationCheiralityException());
} }
@ -484,7 +484,7 @@ TriangulationResult triangulateSafe(const CameraSet<CAMERA>& cameras,
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
// verify that the triangulated point lies in front of all cameras // verify that the triangulated point lies in front of all cameras
// Only needed if this was not yet handled by exception // Only needed if this was not yet handled by exception
const Point3& p_local = pose.transform_to(point); const Point3& p_local = pose.transformTo(point);
if (p_local.z() <= 0) if (p_local.z() <= 0)
return TriangulationResult::BehindCamera(); return TriangulationResult::BehindCamera();
#endif #endif

View File

@ -49,8 +49,7 @@ namespace gtsam {
// Do dense elimination step // Do dense elimination step
KeyVector keyAsVector(1); keyAsVector[0] = key; KeyVector keyAsVector(1); keyAsVector[0] = key;
std::pair<boost::shared_ptr<ConditionalType>, boost::shared_ptr<FactorType> > eliminationResult = auto eliminationResult = function(gatheredFactors, Ordering(keyAsVector));
function(gatheredFactors, Ordering(keyAsVector));
// Add conditional to BayesNet // Add conditional to BayesNet
output->push_back(eliminationResult.first); output->push_back(eliminationResult.first);
@ -190,13 +189,13 @@ namespace gtsam {
{ {
gttic(EliminationTree_eliminate); gttic(EliminationTree_eliminate);
// Allocate result // Allocate result
boost::shared_ptr<BayesNetType> result = boost::make_shared<BayesNetType>(); auto result = boost::make_shared<BayesNetType>();
// Run tree elimination algorithm // Run tree elimination algorithm
FastVector<sharedFactor> remainingFactors = inference::EliminateTree(result, *this, function); FastVector<sharedFactor> remainingFactors = inference::EliminateTree(result, *this, function);
// Add remaining factors that were not involved with eliminated variables // Add remaining factors that were not involved with eliminated variables
boost::shared_ptr<FactorGraphType> allRemainingFactors = boost::make_shared<FactorGraphType>(); auto allRemainingFactors = boost::make_shared<FactorGraphType>();
allRemainingFactors->push_back(remainingFactors_.begin(), remainingFactors_.end()); allRemainingFactors->push_back(remainingFactors_.begin(), remainingFactors_.end());
allRemainingFactors->push_back(remainingFactors.begin(), remainingFactors.end()); allRemainingFactors->push_back(remainingFactors.begin(), remainingFactors.end());

View File

@ -28,6 +28,9 @@
#include <gtsam/inference/Key.h> #include <gtsam/inference/Key.h>
namespace gtsam { namespace gtsam {
/// Define collection types:
typedef FastVector<FactorIndex> FactorIndices;
typedef FastSet<FactorIndex> FactorIndexSet;
/** /**
* This is the base class for all factor types. It is templated on a KEY type, * This is the base class for all factor types. It is templated on a KEY type,

View File

@ -25,7 +25,7 @@ namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
template<class FG> template<class FG>
void VariableIndex::augment(const FG& factors, void VariableIndex::augment(const FG& factors,
boost::optional<const FastVector<size_t>&> newFactorIndices) { boost::optional<const FactorIndices&> newFactorIndices) {
gttic(VariableIndex_augment); gttic(VariableIndex_augment);
// Augment index for each factor // Augment index for each factor

View File

@ -35,8 +35,8 @@ void VariableIndex::print(const string& str, const KeyFormatter& keyFormatter) c
cout << "nEntries = " << nEntries() << ", nFactors = " << nFactors() << "\n"; cout << "nEntries = " << nEntries() << ", nFactors = " << nFactors() << "\n";
for(KeyMap::value_type key_factors: index_) { for(KeyMap::value_type key_factors: index_) {
cout << "var " << keyFormatter(key_factors.first) << ":"; cout << "var " << keyFormatter(key_factors.first) << ":";
for(const size_t factor: key_factors.second) for(const auto index: key_factors.second)
cout << " " << factor; cout << " " << index;
cout << "\n"; cout << "\n";
} }
cout.flush(); cout.flush();
@ -48,8 +48,8 @@ void VariableIndex::outputMetisFormat(ostream& os) const {
// run over variables, which will be hyper-edges. // run over variables, which will be hyper-edges.
for(KeyMap::value_type key_factors: index_) { for(KeyMap::value_type key_factors: index_) {
// every variable is a hyper-edge covering its factors // every variable is a hyper-edge covering its factors
for(const size_t factor: key_factors.second) for(const auto index: key_factors.second)
os << (factor+1) << " "; // base 1 os << (index+1) << " "; // base 1
os << "\n"; os << "\n";
} }
os << flush; os << flush;

View File

@ -17,6 +17,7 @@
#pragma once #pragma once
#include <gtsam/inference/Factor.h>
#include <gtsam/inference/Key.h> #include <gtsam/inference/Key.h>
#include <gtsam/base/FastMap.h> #include <gtsam/base/FastMap.h>
#include <gtsam/base/FastVector.h> #include <gtsam/base/FastVector.h>
@ -43,7 +44,7 @@ class GTSAM_EXPORT VariableIndex {
public: public:
typedef boost::shared_ptr<VariableIndex> shared_ptr; typedef boost::shared_ptr<VariableIndex> shared_ptr;
typedef FastVector<size_t> Factors; typedef FactorIndices Factors;
typedef Factors::iterator Factor_iterator; typedef Factors::iterator Factor_iterator;
typedef Factors::const_iterator Factor_const_iterator; typedef Factors::const_iterator Factor_const_iterator;
@ -63,7 +64,7 @@ public:
/// @name Standard Constructors /// @name Standard Constructors
/// @{ /// @{
/** Default constructor, creates an empty VariableIndex */ /// Default constructor, creates an empty VariableIndex
VariableIndex() : nFactors_(0), nEntries_(0) {} VariableIndex() : nFactors_(0), nEntries_(0) {}
/** /**
@ -77,19 +78,16 @@ public:
/// @name Standard Interface /// @name Standard Interface
/// @{ /// @{
/** /// The number of variable entries. This is equal to the number of unique variable Keys.
* The number of variable entries. This is one greater than the variable
* with the highest index.
*/
size_t size() const { return index_.size(); } size_t size() const { return index_.size(); }
/** The number of factors in the original factor graph */ /// The number of factors in the original factor graph
size_t nFactors() const { return nFactors_; } size_t nFactors() const { return nFactors_; }
/** The number of nonzero blocks, i.e. the number of variable-factor entries */ /// The number of nonzero blocks, i.e. the number of variable-factor entries
size_t nEntries() const { return nEntries_; } size_t nEntries() const { return nEntries_; }
/** Access a list of factors by variable */ /// Access a list of factors by variable
const Factors& operator[](Key variable) const { const Factors& operator[](Key variable) const {
KeyMap::const_iterator item = index_.find(variable); KeyMap::const_iterator item = index_.find(variable);
if(item == index_.end()) if(item == index_.end())
@ -102,10 +100,10 @@ public:
/// @name Testable /// @name Testable
/// @{ /// @{
/** Test for equality (for unit tests and debug assertions). */ /// Test for equality (for unit tests and debug assertions).
bool equals(const VariableIndex& other, double tol=0.0) const; bool equals(const VariableIndex& other, double tol=0.0) const;
/** Print the variable index (for unit tests and debugging). */ /// Print the variable index (for unit tests and debugging).
void print(const std::string& str = "VariableIndex: ", void print(const std::string& str = "VariableIndex: ",
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const; const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
@ -125,7 +123,7 @@ public:
* solving problems incrementally. * solving problems incrementally.
*/ */
template<class FG> template<class FG>
void augment(const FG& factors, boost::optional<const FastVector<size_t>&> newFactorIndices = boost::none); void augment(const FG& factors, boost::optional<const FactorIndices&> newFactorIndices = boost::none);
/** /**
* Remove entries corresponding to the specified factors. NOTE: We intentionally do not decrement * Remove entries corresponding to the specified factors. NOTE: We intentionally do not decrement
@ -140,17 +138,17 @@ public:
template<typename ITERATOR, class FG> template<typename ITERATOR, class FG>
void remove(ITERATOR firstFactor, ITERATOR lastFactor, const FG& factors); void remove(ITERATOR firstFactor, ITERATOR lastFactor, const FG& factors);
/** Remove unused empty variables (in debug mode verifies they are empty). */ /// Remove unused empty variables (in debug mode verifies they are empty).
template<typename ITERATOR> template<typename ITERATOR>
void removeUnusedVariables(ITERATOR firstKey, ITERATOR lastKey); void removeUnusedVariables(ITERATOR firstKey, ITERATOR lastKey);
/** Iterator to the first variable entry */ /// Iterator to the first variable entry
const_iterator begin() const { return index_.begin(); } const_iterator begin() const { return index_.begin(); }
/** Iterator to the first variable entry */ /// Iterator to the first variable entry
const_iterator end() const { return index_.end(); } const_iterator end() const { return index_.end(); }
/** Find the iterator for the requested variable entry */ /// Find the iterator for the requested variable entry
const_iterator find(Key key) const { return index_.find(key); } const_iterator find(Key key) const { return index_.find(key); }
protected: protected:

View File

@ -138,23 +138,34 @@ namespace gtsam {
//} //}
/* ************************************************************************* */ /* ************************************************************************* */
pair<Matrix, Vector> GaussianBayesNet::matrix() const { Ordering GaussianBayesNet::ordering() const {
GaussianFactorGraph factorGraph(*this); GaussianFactorGraph factorGraph(*this);
KeySet keys = factorGraph.keys(); auto keys = factorGraph.keys();
// add frontal keys in order // add frontal keys in order
Ordering ordering; Ordering ordering;
for (const sharedConditional& cg: *this) for (const sharedConditional& cg : *this)
if (cg) { if (cg) {
for (Key key: cg->frontals()) { for (Key key : cg->frontals()) {
ordering.push_back(key); ordering.push_back(key);
keys.erase(key); keys.erase(key);
} }
} }
// add remaining keys in case Bayes net is incomplete // add remaining keys in case Bayes net is incomplete
for (Key key: keys) for (Key key : keys) ordering.push_back(key);
ordering.push_back(key); return ordering;
// return matrix and RHS }
/* ************************************************************************* */
pair<Matrix, Vector> GaussianBayesNet::matrix(boost::optional<const Ordering&> ordering) const {
if (ordering) {
// Convert to a GaussianFactorGraph and use its machinery
GaussianFactorGraph factorGraph(*this);
return factorGraph.jacobian(ordering); return factorGraph.jacobian(ordering);
} else {
// recursively call with default ordering
const auto defaultOrdering = this->ordering();
return matrix(defaultOrdering);
}
} }
///* ************************************************************************* */ ///* ************************************************************************* */
@ -181,11 +192,11 @@ namespace gtsam {
double logDet = 0.0; double logDet = 0.0;
for(const sharedConditional& cg: *this) { for(const sharedConditional& cg: *this) {
if(cg->get_model()) { if(cg->get_model()) {
Vector diag = cg->get_R().diagonal(); Vector diag = cg->R().diagonal();
cg->get_model()->whitenInPlace(diag); cg->get_model()->whitenInPlace(diag);
logDet += diag.unaryExpr(ptr_fun<double,double>(log)).sum(); logDet += diag.unaryExpr(ptr_fun<double,double>(log)).sum();
} else { } else {
logDet += cg->get_R().diagonal().unaryExpr(ptr_fun<double,double>(log)).sum(); logDet += cg->R().diagonal().unaryExpr(ptr_fun<double,double>(log)).sum();
} }
} }
return logDet; return logDet;

View File

@ -74,6 +74,14 @@ namespace gtsam {
/// Version of optimize for incomplete BayesNet, needs solution for missing variables /// Version of optimize for incomplete BayesNet, needs solution for missing variables
VectorValues optimize(const VectorValues& solutionForMissing) const; VectorValues optimize(const VectorValues& solutionForMissing) const;
/**
* Return ordering corresponding to a topological sort.
* There are many topological sorts of a Bayes net. This one
* corresponds to the one that makes 'matrix' below upper-triangular.
* In case Bayes net is incomplete any non-frontal are added to the end.
*/
Ordering ordering() const;
///@} ///@}
///@name Linear Algebra ///@name Linear Algebra
@ -81,8 +89,10 @@ namespace gtsam {
/** /**
* Return (dense) upper-triangular matrix representation * Return (dense) upper-triangular matrix representation
* Will return upper-triangular matrix only when using 'ordering' above.
* In case Bayes net is incomplete zero columns are added to the end.
*/ */
std::pair<Matrix, Vector> matrix() const; std::pair<Matrix, Vector> matrix(boost::optional<const Ordering&> ordering = boost::none) const;
/** /**
* Optimize along the gradient direction, with a closed-form computation to perform the line * Optimize along the gradient direction, with a closed-form computation to perform the line

View File

@ -43,7 +43,7 @@ double logDeterminant(const typename BAYESTREE::sharedClique& clique) {
double result = 0.0; double result = 0.0;
// this clique // this clique
result += clique->conditional()->get_R().diagonal().unaryExpr(std::ptr_fun<double,double>(log)).sum(); result += clique->conditional()->R().diagonal().unaryExpr(std::ptr_fun<double,double>(log)).sum();
// sum of children // sum of children
for(const typename BAYESTREE::sharedClique& child: clique->children_) for(const typename BAYESTREE::sharedClique& child: clique->children_)

View File

@ -37,7 +37,7 @@ namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
double logDeterminant(const GaussianBayesTreeClique::shared_ptr& clique, double& parentSum) double logDeterminant(const GaussianBayesTreeClique::shared_ptr& clique, double& parentSum)
{ {
parentSum += clique->conditional()->get_R().diagonal().unaryExpr(std::ptr_fun<double,double>(log)).sum(); parentSum += clique->conditional()->R().diagonal().unaryExpr(std::ptr_fun<double,double>(log)).sum();
assert(false); assert(false);
return 0; return 0;
} }

View File

@ -62,7 +62,7 @@ namespace gtsam {
cout << (boost::format("[%1%]")%(formatter(*it))).str() << " "; cout << (boost::format("[%1%]")%(formatter(*it))).str() << " ";
} }
cout << endl; cout << endl;
cout << formatMatrixIndented(" R = ", get_R()) << endl; cout << formatMatrixIndented(" R = ", R()) << endl;
for (const_iterator it = beginParents() ; it != endParents() ; ++it) { for (const_iterator it = beginParents() ; it != endParents() ; ++it) {
cout << formatMatrixIndented((boost::format(" S[%1%] = ")%(formatter(*it))).str(), getA(it)) cout << formatMatrixIndented((boost::format(" S[%1%] = ")%(formatter(*it))).str(), getA(it))
<< endl; << endl;
@ -84,8 +84,8 @@ namespace gtsam {
// check if R_ and d_ are linear independent // check if R_ and d_ are linear independent
for (DenseIndex i = 0; i < Ab_.rows(); i++) { for (DenseIndex i = 0; i < Ab_.rows(); i++) {
list<Vector> rows1, rows2; list<Vector> rows1, rows2;
rows1.push_back(Vector(get_R().row(i))); rows1.push_back(Vector(R().row(i)));
rows2.push_back(Vector(c->get_R().row(i))); rows2.push_back(Vector(c->R().row(i)));
// check if the matrices are the same // check if the matrices are the same
// iterate over the parents_ map // iterate over the parents_ map
@ -120,10 +120,10 @@ namespace gtsam {
const Vector xS = x.vector(KeyVector(beginParents(), endParents())); const Vector xS = x.vector(KeyVector(beginParents(), endParents()));
// Update right-hand-side // Update right-hand-side
const Vector rhs = get_d() - get_S() * xS; const Vector rhs = d() - S() * xS;
// Solve matrix // Solve matrix
const Vector solution = get_R().triangularView<Eigen::Upper>().solve(rhs); const Vector solution = R().triangularView<Eigen::Upper>().solve(rhs);
// Check for indeterminant solution // Check for indeterminant solution
if (solution.hasNaN()) { if (solution.hasNaN()) {
@ -134,7 +134,7 @@ namespace gtsam {
VectorValues result; VectorValues result;
DenseIndex vectorPosition = 0; DenseIndex vectorPosition = 0;
for (const_iterator frontal = beginFrontals(); frontal != endFrontals(); ++frontal) { for (const_iterator frontal = beginFrontals(); frontal != endFrontals(); ++frontal) {
result.insert(*frontal, solution.segment(vectorPosition, getDim(frontal))); result.emplace(*frontal, solution.segment(vectorPosition, getDim(frontal)));
vectorPosition += getDim(frontal); vectorPosition += getDim(frontal);
} }
@ -149,10 +149,10 @@ namespace gtsam {
// Instead of updating getb(), update the right-hand-side from the given rhs // Instead of updating getb(), update the right-hand-side from the given rhs
const Vector rhsR = rhs.vector(KeyVector(beginFrontals(), endFrontals())); const Vector rhsR = rhs.vector(KeyVector(beginFrontals(), endFrontals()));
xS = rhsR - get_S() * xS; xS = rhsR - S() * xS;
// Solve Matrix // Solve Matrix
Vector soln = get_R().triangularView<Eigen::Upper>().solve(xS); Vector soln = R().triangularView<Eigen::Upper>().solve(xS);
// Scale by sigmas // Scale by sigmas
if (model_) if (model_)
@ -162,7 +162,7 @@ namespace gtsam {
VectorValues result; VectorValues result;
DenseIndex vectorPosition = 0; DenseIndex vectorPosition = 0;
for (const_iterator frontal = beginFrontals(); frontal != endFrontals(); ++frontal) { for (const_iterator frontal = beginFrontals(); frontal != endFrontals(); ++frontal) {
result.insert(*frontal, soln.segment(vectorPosition, getDim(frontal))); result.emplace(*frontal, soln.segment(vectorPosition, getDim(frontal)));
vectorPosition += getDim(frontal); vectorPosition += getDim(frontal);
} }
@ -172,13 +172,13 @@ namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
void GaussianConditional::solveTransposeInPlace(VectorValues& gy) const { void GaussianConditional::solveTransposeInPlace(VectorValues& gy) const {
Vector frontalVec = gy.vector(KeyVector(beginFrontals(), endFrontals())); Vector frontalVec = gy.vector(KeyVector(beginFrontals(), endFrontals()));
frontalVec = gtsam::backSubstituteUpper(frontalVec, Matrix(get_R())); frontalVec = R().transpose().triangularView<Eigen::Lower>().solve(frontalVec);
// Check for indeterminant solution // Check for indeterminant solution
if (frontalVec.hasNaN()) throw IndeterminantLinearSystemException(this->keys().front()); if (frontalVec.hasNaN()) throw IndeterminantLinearSystemException(this->keys().front());
for (const_iterator it = beginParents(); it!= endParents(); it++) for (const_iterator it = beginParents(); it!= endParents(); it++)
gy[*it] += -1.0 * Matrix(getA(it)).transpose() * frontalVec; gy[*it].noalias() += -1.0 * getA(it).transpose() * frontalVec;
// Scale by sigmas // Scale by sigmas
if (model_) if (model_)

View File

@ -94,16 +94,16 @@ namespace gtsam {
bool equals(const GaussianFactor&cg, double tol = 1e-9) const; bool equals(const GaussianFactor&cg, double tol = 1e-9) const;
/** Return a view of the upper-triangular R block of the conditional */ /** Return a view of the upper-triangular R block of the conditional */
constABlock get_R() const { return Ab_.range(0, nrFrontals()); } constABlock R() const { return Ab_.range(0, nrFrontals()); }
/** Get a view of the parent blocks. */ /** Get a view of the parent blocks. */
constABlock get_S() const { return Ab_.range(nrFrontals(), size()); } constABlock S() const { return Ab_.range(nrFrontals(), size()); }
/** Get a view of the S matrix for the variable pointed to by the given key iterator */ /** Get a view of the S matrix for the variable pointed to by the given key iterator */
constABlock get_S(const_iterator variable) const { return BaseFactor::getA(variable); } constABlock S(const_iterator it) const { return BaseFactor::getA(it); }
/** Get a view of the r.h.s. vector d */ /** Get a view of the r.h.s. vector d */
const constBVector get_d() const { return BaseFactor::getb(); } const constBVector d() const { return BaseFactor::getb(); }
/** /**
* Solves a conditional Gaussian and writes the solution into the entries of * Solves a conditional Gaussian and writes the solution into the entries of
@ -130,8 +130,17 @@ namespace gtsam {
void scaleFrontalsBySigma(VectorValues& gy) const; void scaleFrontalsBySigma(VectorValues& gy) const;
// __declspec(deprecated) void scaleFrontalsBySigma(VectorValues& gy) const; // FIXME: depreciated flag doesn't appear to exist? // __declspec(deprecated) void scaleFrontalsBySigma(VectorValues& gy) const; // FIXME: depreciated flag doesn't appear to exist?
private: #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
constABlock get_R() const { return R(); }
constABlock get_S() const { return S(); }
constABlock get_S(const_iterator it) const { return S(it); }
const constBVector get_d() const { return d(); }
/// @}
#endif
private:
/** Serialization function */ /** Serialization function */
friend class boost::serialization::access; friend class boost::serialization::access;
template<class Archive> template<class Archive>

View File

@ -35,8 +35,8 @@ namespace gtsam {
for(const_iterator it = beginFrontals(); it != endFrontals(); ++it) for(const_iterator it = beginFrontals(); it != endFrontals(); ++it)
cout << (boost::format("[%1%]")%(formatter(*it))).str() << " "; cout << (boost::format("[%1%]")%(formatter(*it))).str() << " ";
cout << endl; cout << endl;
gtsam::print(Matrix(get_R()), "R: "); gtsam::print(Matrix(R()), "R: ");
gtsam::print(Vector(get_d()), "d: "); gtsam::print(Vector(d()), "d: ");
if(model_) if(model_)
model_->print("Noise model: "); model_->print("Noise model: ");
} }

View File

@ -61,7 +61,7 @@ namespace gtsam {
for (GaussianFactor::const_iterator it = gf->begin(); it != gf->end(); it++) { for (GaussianFactor::const_iterator it = gf->begin(); it != gf->end(); it++) {
map<Key,size_t>::iterator it2 = spec.find(*it); map<Key,size_t>::iterator it2 = spec.find(*it);
if ( it2 == spec.end() ) { if ( it2 == spec.end() ) {
spec.insert(make_pair(*it, gf->getDim(it))); spec.emplace(*it, gf->getDim(it));
} }
} }
} }
@ -246,7 +246,7 @@ namespace gtsam {
if (blocks.count(j)) if (blocks.count(j))
blocks[j] += Bj; blocks[j] += Bj;
else else
blocks.insert(make_pair(j,Bj)); blocks.emplace(j,Bj);
} }
} }
return blocks; return blocks;

View File

@ -305,7 +305,7 @@ Matrix HessianFactor::information() const {
VectorValues HessianFactor::hessianDiagonal() const { VectorValues HessianFactor::hessianDiagonal() const {
VectorValues d; VectorValues d;
for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) { for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) {
d.insert(keys_[j], info_.diagonal(j)); d.emplace(keys_[j], info_.diagonal(j));
} }
return d; return d;
} }
@ -436,7 +436,7 @@ VectorValues HessianFactor::gradientAtZero() const {
VectorValues g; VectorValues g;
size_t n = size(); size_t n = size();
for (size_t j = 0; j < n; ++j) for (size_t j = 0; j < n; ++j)
g.insert(keys_[j], -info_.aboveDiagonalBlock(j, n)); g.emplace(keys_[j], -info_.aboveDiagonalBlock(j, n));
return g; return g;
} }
@ -487,6 +487,11 @@ boost::shared_ptr<GaussianConditional> HessianFactor::eliminateCholesky(const Or
// Erase the eliminated keys in this factor // Erase the eliminated keys in this factor
keys_.erase(begin(), begin() + nFrontals); keys_.erase(begin(), begin() + nFrontals);
} catch (const CholeskyFailed&) { } catch (const CholeskyFailed&) {
#ifndef NDEBUG
cout << "Partial Cholesky on HessianFactor failed." << endl;
keys.print("Frontal keys ");
print("HessianFactor:");
#endif
throw IndeterminantLinearSystemException(keys.front()); throw IndeterminantLinearSystemException(keys.front());
} }
@ -513,7 +518,7 @@ VectorValues HessianFactor::solve() {
std::size_t offset = 0; std::size_t offset = 0;
for (DenseIndex j = 0; j < (DenseIndex)n; ++j) { for (DenseIndex j = 0; j < (DenseIndex)n; ++j) {
const DenseIndex dim = info_.getDim(j); const DenseIndex dim = info_.getDim(j);
delta.insert(keys_[j], solution.segment(offset, dim)); delta.emplace(keys_[j], solution.segment(offset, dim));
offset += dim; offset += dim;
} }

View File

@ -117,7 +117,7 @@ void KeyInfo::initialize(const GaussianFactorGraph &fg) {
for (size_t i = 0; i < n; ++i) { for (size_t i = 0; i < n; ++i) {
const size_t key = ordering_[i]; const size_t key = ordering_[i];
const size_t dim = colspec.find(key)->second; const size_t dim = colspec.find(key)->second;
insert(make_pair(key, KeyInfoEntry(i, dim, start))); this->emplace(key, KeyInfoEntry(i, dim, start));
start += dim; start += dim;
} }
numCols_ = start; numCols_ = start;
@ -126,8 +126,8 @@ void KeyInfo::initialize(const GaussianFactorGraph &fg) {
/****************************************************************************/ /****************************************************************************/
vector<size_t> KeyInfo::colSpec() const { vector<size_t> KeyInfo::colSpec() const {
std::vector<size_t> result(size(), 0); std::vector<size_t> result(size(), 0);
for ( const KeyInfo::value_type &item: *this ) { for ( const auto &item: *this ) {
result[item.second.index()] = item.second.dim(); result[item.second.index] = item.second.dim;
} }
return result; return result;
} }
@ -135,8 +135,8 @@ vector<size_t> KeyInfo::colSpec() const {
/****************************************************************************/ /****************************************************************************/
VectorValues KeyInfo::x0() const { VectorValues KeyInfo::x0() const {
VectorValues result; VectorValues result;
for ( const KeyInfo::value_type &item: *this ) { for ( const auto &item: *this ) {
result.insert(item.first, Vector::Zero(item.second.dim())); result.emplace(item.first, Vector::Zero(item.second.dim));
} }
return result; return result;
} }

View File

@ -32,8 +32,8 @@
namespace gtsam { namespace gtsam {
// Forward declarations // Forward declarations
struct KeyInfoEntry;
class KeyInfo; class KeyInfo;
class KeyInfoEntry;
class GaussianFactorGraph; class GaussianFactorGraph;
class Values; class Values;
class VectorValues; class VectorValues;
@ -109,27 +109,14 @@ public:
/** /**
* Handy data structure for iterative solvers * Handy data structure for iterative solvers
* key to (index, dimension, colstart) * key to (index, dimension, start)
*/ */
class GTSAM_EXPORT KeyInfoEntry: public boost::tuple<Key, size_t, Key> { struct GTSAM_EXPORT KeyInfoEntry {
size_t index, dim, start;
public:
typedef boost::tuple<Key, size_t, Key> Base;
KeyInfoEntry() { KeyInfoEntry() {
} }
KeyInfoEntry(size_t idx, size_t d, Key start) : KeyInfoEntry(size_t idx, size_t d, Key start) :
Base(idx, d, start) { index(idx), dim(d), start(start) {
}
size_t index() const {
return this->get<0>();
}
size_t dim() const {
return this->get<1>();
}
size_t colstart() const {
return this->get<2>();
} }
}; };

View File

@ -102,10 +102,9 @@ JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2,
} }
/* ************************************************************************* */ /* ************************************************************************* */
JacobianFactor::JacobianFactor(const HessianFactor& factor) : JacobianFactor::JacobianFactor(const HessianFactor& factor)
Base(factor), Ab_( : Base(factor),
VerticalBlockMatrix::LikeActiveViewOf(factor.info(), Ab_(VerticalBlockMatrix::LikeActiveViewOf(factor.info(), factor.rows())) {
factor.rows())) {
// Copy Hessian into our matrix and then do in-place Cholesky // Copy Hessian into our matrix and then do in-place Cholesky
Ab_.full() = factor.info().selfadjointView(); Ab_.full() = factor.info().selfadjointView();
@ -114,16 +113,19 @@ JacobianFactor::JacobianFactor(const HessianFactor& factor) :
bool success; bool success;
boost::tie(maxrank, success) = choleskyCareful(Ab_.matrix()); boost::tie(maxrank, success) = choleskyCareful(Ab_.matrix());
// Check for indefinite system // Check that Cholesky succeeded OR it managed to factor the full Hessian.
if (!success) // THe latter case occurs with non-positive definite matrices arising from QP.
throw IndeterminantLinearSystemException(factor.keys().front()); if (success || maxrank == factor.rows() - 1) {
// Zero out lower triangle // Zero out lower triangle
Ab_.matrix().topRows(maxrank).triangularView<Eigen::StrictlyLower>() = Ab_.matrix().topRows(maxrank).triangularView<Eigen::StrictlyLower>() =
Matrix::Zero(maxrank, Ab_.matrix().cols()); Matrix::Zero(maxrank, Ab_.matrix().cols());
// FIXME: replace with triangular system // FIXME: replace with triangular system
Ab_.rowEnd() = maxrank; Ab_.rowEnd() = maxrank;
model_ = SharedDiagonal(); // should be same as Unit::Create(maxrank); model_ = SharedDiagonal(); // is equivalent to Unit::Create(maxrank)
} else {
// indefinite system
throw IndeterminantLinearSystemException(factor.keys().front());
}
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -164,9 +166,10 @@ boost::tuple<FastVector<DenseIndex>, DenseIndex, DenseIndex> _countDims(
n += vardim; n += vardim;
} else { } else {
if(!(varDims[jointVarpos] == vardim)) { if(!(varDims[jointVarpos] == vardim)) {
cout << "Factor " << sourceFactorI << " variable " << DefaultKeyFormatter(sourceFactor.keys()[sourceVarpos]) << std::stringstream ss;
" has different dimensionality of " << vardim << " instead of " << varDims[jointVarpos] << endl; ss << "Factor " << sourceFactorI << " variable " << DefaultKeyFormatter(sourceFactor.keys()[sourceVarpos]) <<
exit(1); " has different dimensionality of " << vardim << " instead of " << varDims[jointVarpos];
throw std::runtime_error(ss.str());
} }
} }
#else #else
@ -429,10 +432,9 @@ Vector JacobianFactor::unweighted_error(const VectorValues& c) const {
/* ************************************************************************* */ /* ************************************************************************* */
Vector JacobianFactor::error_vector(const VectorValues& c) const { Vector JacobianFactor::error_vector(const VectorValues& c) const {
if (model_) Vector e = unweighted_error(c);
return model_->whiten(unweighted_error(c)); if (model_) model_->whitenInPlace(e);
else return e;
return unweighted_error(c);
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -473,10 +475,10 @@ VectorValues JacobianFactor::hessianDiagonal() const {
for (size_t k = 0; k < nj; ++k) { for (size_t k = 0; k < nj; ++k) {
Vector column_k = Ab_(pos).col(k); Vector column_k = Ab_(pos).col(k);
if (model_) if (model_)
column_k = model_->whiten(column_k); model_->whitenInPlace(column_k);
dj(k) = dot(column_k, column_k); dj(k) = dot(column_k, column_k);
} }
d.insert(j, dj); d.emplace(j, dj);
} }
return d; return d;
} }
@ -495,7 +497,7 @@ map<Key, Matrix> JacobianFactor::hessianBlockDiagonal() const {
Matrix Aj = Ab_(pos); Matrix Aj = Ab_(pos);
if (model_) if (model_)
Aj = model_->Whiten(Aj); Aj = model_->Whiten(Aj);
blocks.insert(make_pair(j, Aj.transpose() * Aj)); blocks.emplace(j, Aj.transpose() * Aj);
} }
return blocks; return blocks;
} }
@ -540,29 +542,38 @@ void JacobianFactor::updateHessian(const KeyVector& infoKeys,
/* ************************************************************************* */ /* ************************************************************************* */
Vector JacobianFactor::operator*(const VectorValues& x) const { Vector JacobianFactor::operator*(const VectorValues& x) const {
Vector Ax = Vector::Zero(Ab_.rows()); Vector Ax(Ab_.rows());
Ax.setZero();
if (empty()) if (empty())
return Ax; return Ax;
// Just iterate over all A matrices and multiply in correct config part // Just iterate over all A matrices and multiply in correct config part
for (size_t pos = 0; pos < size(); ++pos) for (size_t pos = 0; pos < size(); ++pos) {
Ax += Ab_(pos) * x[keys_[pos]]; // http://eigen.tuxfamily.org/dox/TopicWritingEfficientProductExpression.html
Ax.noalias() += Ab_(pos) * x[keys_[pos]];
}
return model_ ? model_->whiten(Ax) : Ax; if (model_) model_->whitenInPlace(Ax);
return Ax;
} }
/* ************************************************************************* */ /* ************************************************************************* */
void JacobianFactor::transposeMultiplyAdd(double alpha, const Vector& e, void JacobianFactor::transposeMultiplyAdd(double alpha, const Vector& e,
VectorValues& x) const { VectorValues& x) const {
Vector E = alpha * (model_ ? model_->whiten(e) : e); Vector E(e.size());
E.noalias() = alpha * e;
if (model_) model_->whitenInPlace(E);
// Just iterate over all A matrices and insert Ai^e into VectorValues // Just iterate over all A matrices and insert Ai^e into VectorValues
for (size_t pos = 0; pos < size(); ++pos) { for (size_t pos = 0; pos < size(); ++pos) {
Key j = keys_[pos]; const Key j = keys_[pos];
// Create the value as a zero vector if it does not exist. // To avoid another malloc if key exists, we explicitly check
pair<VectorValues::iterator, bool> xi = x.tryInsert(j, Vector()); auto it = x.find(j);
if (xi.second) if (it != x.end()) {
xi.first->second = Vector::Zero(getDim(begin() + pos)); // http://eigen.tuxfamily.org/dox/TopicWritingEfficientProductExpression.html
xi.first->second += Ab_(pos).transpose()*E; it->second.noalias() += Ab_(pos).transpose() * E;
} else {
x.emplace(j, Ab_(pos).transpose() * E);
}
} }
} }
@ -624,8 +635,8 @@ VectorValues JacobianFactor::gradientAtZero() const {
Vector b = getb(); Vector b = getb();
// Gradient is really -A'*b / sigma^2 // Gradient is really -A'*b / sigma^2
// transposeMultiplyAdd will divide by sigma once, so we need one more // transposeMultiplyAdd will divide by sigma once, so we need one more
Vector b_sigma = model_ ? model_->whiten(b) : b; if (model_) model_->whitenInPlace(b);
this->transposeMultiplyAdd(-1.0, b_sigma, g); // g -= A'*b/sigma^2 this->transposeMultiplyAdd(-1.0, b, g); // g -= A'*b/sigma^2
return g; return g;
} }

View File

@ -89,7 +89,7 @@ void GaussianFactorGraphSystem::multiply(const Vector &x, Vector& AtAx) const {
VectorValues vvX = buildVectorValues(x, keyInfo_); VectorValues vvX = buildVectorValues(x, keyInfo_);
// VectorValues form of A'Ax for multiplyHessianAdd // VectorValues form of A'Ax for multiplyHessianAdd
VectorValues vvAtAx; VectorValues vvAtAx = keyInfo_.x0(); // crucial for performance
// vvAtAx += 1.0 * A'Ax for each factor // vvAtAx += 1.0 * A'Ax for each factor
gfg_.multiplyHessianAdd(1.0, vvX, vvAtAx); gfg_.multiplyHessianAdd(1.0, vvX, vvAtAx);
@ -132,14 +132,14 @@ VectorValues buildVectorValues(const Vector &v, const Ordering &ordering,
DenseIndex offset = 0; DenseIndex offset = 0;
for (size_t i = 0; i < ordering.size(); ++i) { for (size_t i = 0; i < ordering.size(); ++i) {
const Key &key = ordering[i]; const Key key = ordering[i];
map<Key, size_t>::const_iterator it = dimensions.find(key); map<Key, size_t>::const_iterator it = dimensions.find(key);
if (it == dimensions.end()) { if (it == dimensions.end()) {
throw invalid_argument( throw invalid_argument(
"buildVectorValues: inconsistent ordering and dimensions"); "buildVectorValues: inconsistent ordering and dimensions");
} }
const size_t dim = it->second; const size_t dim = it->second;
result.insert(key, v.segment(offset, dim)); result.emplace(key, v.segment(offset, dim));
offset += dim; offset += dim;
} }
@ -150,8 +150,7 @@ VectorValues buildVectorValues(const Vector &v, const Ordering &ordering,
VectorValues buildVectorValues(const Vector &v, const KeyInfo &keyInfo) { VectorValues buildVectorValues(const Vector &v, const KeyInfo &keyInfo) {
VectorValues result; VectorValues result;
for ( const KeyInfo::value_type &item: keyInfo ) { for ( const KeyInfo::value_type &item: keyInfo ) {
result.insert(item.first, result.emplace(item.first, v.segment(item.second.start, item.second.dim));
v.segment(item.second.colstart(), item.second.dim()));
} }
return result; return result;
} }

View File

@ -70,21 +70,20 @@ public:
Preconditioner() {} Preconditioner() {}
virtual ~Preconditioner() {} virtual ~Preconditioner() {}
/* Computation Interfaces */ /*
* Abstract interface for raw vectors. VectorValues is a speed bottleneck
* and Yong-Dian has profiled preconditioners (outside GTSAM) with the the
* three methods below. In GTSAM, unfortunately, we are still using the
* VectorValues methods called in iterative-inl.h
*/
/* implement x = L^{-1} y */ /// implement x = L^{-1} y
virtual void solve(const Vector& y, Vector &x) const = 0; virtual void solve(const Vector& y, Vector &x) const = 0;
// virtual void solve(const VectorValues& y, VectorValues &x) const = 0;
/* implement x = L^{-T} y */ /// implement x = L^{-T} y
virtual void transposeSolve(const Vector& y, Vector& x) const = 0; virtual void transposeSolve(const Vector& y, Vector& x) const = 0;
// virtual void transposeSolve(const VectorValues& y, VectorValues &x) const = 0;
// /* implement x = L^{-1} L^{-T} y */ /// build/factorize the preconditioner
// virtual void fullSolve(const Vector& y, Vector &x) const = 0;
// virtual void fullSolve(const VectorValues& y, VectorValues &x) const = 0;
/* build/factorize the preconditioner */
virtual void build( virtual void build(
const GaussianFactorGraph &gfg, const GaussianFactorGraph &gfg,
const KeyInfo &info, const KeyInfo &info,
@ -113,14 +112,7 @@ public:
/* Computation Interfaces for raw vector */ /* Computation Interfaces for raw vector */
virtual void solve(const Vector& y, Vector &x) const { x = y; } virtual void solve(const Vector& y, Vector &x) const { x = y; }
// virtual void solve(const VectorValues& y, VectorValues& x) const { x = y; }
virtual void transposeSolve(const Vector& y, Vector& x) const { x = y; } virtual void transposeSolve(const Vector& y, Vector& x) const { x = y; }
// virtual void transposeSolve(const VectorValues& y, VectorValues& x) const { x = y; }
// virtual void fullSolve(const Vector& y, Vector &x) const { x = y; }
// virtual void fullSolve(const VectorValues& y, VectorValues& x) const { x = y; }
virtual void build( virtual void build(
const GaussianFactorGraph &gfg, const GaussianFactorGraph &gfg,
const KeyInfo &info, const KeyInfo &info,
@ -145,8 +137,6 @@ public:
/* Computation Interfaces for raw vector */ /* Computation Interfaces for raw vector */
virtual void solve(const Vector& y, Vector &x) const; virtual void solve(const Vector& y, Vector &x) const;
virtual void transposeSolve(const Vector& y, Vector& x) const ; virtual void transposeSolve(const Vector& y, Vector& x) const ;
// virtual void fullSolve(const Vector& y, Vector &x) const ;
virtual void build( virtual void build(
const GaussianFactorGraph &gfg, const GaussianFactorGraph &gfg,
const KeyInfo &info, const KeyInfo &info,

View File

@ -0,0 +1,545 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file SubgraphBuilder.cpp
* @date Dec 31, 2009
* @author Frank Dellaert, Yong-Dian Jian
*/
#include <gtsam/base/DSFVector.h>
#include <gtsam/base/FastMap.h>
#include <gtsam/inference/Ordering.h>
#include <gtsam/inference/VariableIndex.h>
#include <gtsam/linear/Errors.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/SubgraphBuilder.h>
#include <boost/algorithm/string.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <boost/serialization/vector.hpp>
#include <algorithm>
#include <cmath>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <numeric> // accumulate
#include <queue>
#include <set>
#include <stdexcept>
#include <vector>
using std::cout;
using std::endl;
using std::ostream;
using std::vector;
namespace gtsam {
/*****************************************************************************/
/* sort the container and return permutation index with default comparator */
template <typename Container>
static vector<size_t> sort_idx(const Container &src) {
typedef typename Container::value_type T;
const size_t n = src.size();
vector<std::pair<size_t, T> > tmp;
tmp.reserve(n);
for (size_t i = 0; i < n; i++) tmp.emplace_back(i, src[i]);
/* sort */
std::stable_sort(tmp.begin(), tmp.end());
/* copy back */
vector<size_t> idx;
idx.reserve(n);
for (size_t i = 0; i < n; i++) {
idx.push_back(tmp[i].first);
}
return idx;
}
/*****************************************************************************/
static vector<size_t> iidSampler(const vector<double> &weight, const size_t n) {
/* compute the sum of the weights */
const double sum = std::accumulate(weight.begin(), weight.end(), 0.0);
if (sum == 0.0) {
throw std::runtime_error("Weight vector has no non-zero weights");
}
/* make a normalized and accumulated version of the weight vector */
const size_t m = weight.size();
vector<double> cdf;
cdf.reserve(m);
for (size_t i = 0; i < m; ++i) {
cdf.push_back(weight[i] / sum);
}
vector<double> acc(m);
std::partial_sum(cdf.begin(), cdf.end(), acc.begin());
/* iid sample n times */
vector<size_t> samples;
samples.reserve(n);
const double denominator = (double)RAND_MAX;
for (size_t i = 0; i < n; ++i) {
const double value = rand() / denominator;
/* binary search the interval containing "value" */
const auto it = std::lower_bound(acc.begin(), acc.end(), value);
const size_t index = it - acc.begin();
samples.push_back(index);
}
return samples;
}
/*****************************************************************************/
static vector<size_t> UniqueSampler(const vector<double> &weight,
const size_t n) {
const size_t m = weight.size();
if (n > m) throw std::invalid_argument("UniqueSampler: invalid input size");
vector<size_t> samples;
size_t count = 0;
vector<bool> touched(m, false);
while (count < n) {
vector<size_t> localIndices;
localIndices.reserve(n - count);
vector<double> localWeights;
localWeights.reserve(n - count);
/* collect data */
for (size_t i = 0; i < m; ++i) {
if (!touched[i]) {
localIndices.push_back(i);
localWeights.push_back(weight[i]);
}
}
/* sampling and cache results */
vector<size_t> samples = iidSampler(localWeights, n - count);
for (const size_t &index : samples) {
if (touched[index] == false) {
touched[index] = true;
samples.push_back(index);
if (++count >= n) break;
}
}
}
return samples;
}
/****************************************************************************/
Subgraph::Subgraph(const vector<size_t> &indices) {
edges_.reserve(indices.size());
for (const size_t &index : indices) {
const Edge e {index,1.0};
edges_.push_back(e);
}
}
/****************************************************************************/
vector<size_t> Subgraph::edgeIndices() const {
vector<size_t> eid;
eid.reserve(size());
for (const Edge &edge : edges_) {
eid.push_back(edge.index);
}
return eid;
}
/****************************************************************************/
void Subgraph::save(const std::string &fn) const {
std::ofstream os(fn.c_str());
boost::archive::text_oarchive oa(os);
oa << *this;
os.close();
}
/****************************************************************************/
Subgraph Subgraph::load(const std::string &fn) {
std::ifstream is(fn.c_str());
boost::archive::text_iarchive ia(is);
Subgraph subgraph;
ia >> subgraph;
is.close();
return subgraph;
}
/****************************************************************************/
ostream &operator<<(ostream &os, const Subgraph::Edge &edge) {
if (edge.weight != 1.0)
os << edge.index << "(" << std::setprecision(2) << edge.weight << ")";
else
os << edge.index;
return os;
}
/****************************************************************************/
ostream &operator<<(ostream &os, const Subgraph &subgraph) {
os << "Subgraph" << endl;
for (const auto &e : subgraph.edges()) {
os << e << ", ";
}
return os;
}
/*****************************************************************************/
void SubgraphBuilderParameters::print() const { print(cout); }
/***************************************************************************************/
void SubgraphBuilderParameters::print(ostream &os) const {
os << "SubgraphBuilderParameters" << endl
<< "skeleton: " << skeletonTranslator(skeletonType) << endl
<< "skeleton weight: " << skeletonWeightTranslator(skeletonWeight)
<< endl
<< "augmentation weight: "
<< augmentationWeightTranslator(augmentationWeight) << endl;
}
/*****************************************************************************/
ostream &operator<<(ostream &os, const SubgraphBuilderParameters &p) {
p.print(os);
return os;
}
/*****************************************************************************/
SubgraphBuilderParameters::Skeleton
SubgraphBuilderParameters::skeletonTranslator(const std::string &src) {
std::string s = src;
boost::algorithm::to_upper(s);
if (s == "NATURALCHAIN")
return NATURALCHAIN;
else if (s == "BFS")
return BFS;
else if (s == "KRUSKAL")
return KRUSKAL;
throw std::invalid_argument(
"SubgraphBuilderParameters::skeletonTranslator undefined string " + s);
return KRUSKAL;
}
/****************************************************************/
std::string SubgraphBuilderParameters::skeletonTranslator(Skeleton s) {
if (s == NATURALCHAIN)
return "NATURALCHAIN";
else if (s == BFS)
return "BFS";
else if (s == KRUSKAL)
return "KRUSKAL";
else
return "UNKNOWN";
}
/****************************************************************/
SubgraphBuilderParameters::SkeletonWeight
SubgraphBuilderParameters::skeletonWeightTranslator(const std::string &src) {
std::string s = src;
boost::algorithm::to_upper(s);
if (s == "EQUAL")
return EQUAL;
else if (s == "RHS")
return RHS_2NORM;
else if (s == "LHS")
return LHS_FNORM;
else if (s == "RANDOM")
return RANDOM;
throw std::invalid_argument(
"SubgraphBuilderParameters::skeletonWeightTranslator undefined string " +
s);
return EQUAL;
}
/****************************************************************/
std::string SubgraphBuilderParameters::skeletonWeightTranslator(
SkeletonWeight w) {
if (w == EQUAL)
return "EQUAL";
else if (w == RHS_2NORM)
return "RHS";
else if (w == LHS_FNORM)
return "LHS";
else if (w == RANDOM)
return "RANDOM";
else
return "UNKNOWN";
}
/****************************************************************/
SubgraphBuilderParameters::AugmentationWeight
SubgraphBuilderParameters::augmentationWeightTranslator(
const std::string &src) {
std::string s = src;
boost::algorithm::to_upper(s);
if (s == "SKELETON") return SKELETON;
// else if (s == "STRETCH") return STRETCH;
// else if (s == "GENERALIZED_STRETCH") return GENERALIZED_STRETCH;
throw std::invalid_argument(
"SubgraphBuilder::Parameters::augmentationWeightTranslator undefined "
"string " +
s);
return SKELETON;
}
/****************************************************************/
std::string SubgraphBuilderParameters::augmentationWeightTranslator(
AugmentationWeight w) {
if (w == SKELETON) return "SKELETON";
// else if ( w == STRETCH ) return "STRETCH";
// else if ( w == GENERALIZED_STRETCH ) return "GENERALIZED_STRETCH";
else
return "UNKNOWN";
}
/****************************************************************/
vector<size_t> SubgraphBuilder::buildTree(const GaussianFactorGraph &gfg,
const FastMap<Key, size_t> &ordering,
const vector<double> &weights) const {
const SubgraphBuilderParameters &p = parameters_;
switch (p.skeletonType) {
case SubgraphBuilderParameters::NATURALCHAIN:
return natural_chain(gfg);
break;
case SubgraphBuilderParameters::BFS:
return bfs(gfg);
break;
case SubgraphBuilderParameters::KRUSKAL:
return kruskal(gfg, ordering, weights);
break;
default:
std::cerr << "SubgraphBuilder::buildTree undefined skeleton type" << endl;
break;
}
return vector<size_t>();
}
/****************************************************************/
vector<size_t> SubgraphBuilder::unary(const GaussianFactorGraph &gfg) const {
vector<size_t> unaryFactorIndices;
size_t index = 0;
for (const auto &factor : gfg) {
if (factor->size() == 1) {
unaryFactorIndices.push_back(index);
}
index++;
}
return unaryFactorIndices;
}
/****************************************************************/
vector<size_t> SubgraphBuilder::natural_chain(
const GaussianFactorGraph &gfg) const {
vector<size_t> chainFactorIndices;
size_t index = 0;
for (const GaussianFactor::shared_ptr &gf : gfg) {
if (gf->size() == 2) {
const Key k0 = gf->keys()[0], k1 = gf->keys()[1];
if ((k1 - k0) == 1 || (k0 - k1) == 1) chainFactorIndices.push_back(index);
}
index++;
}
return chainFactorIndices;
}
/****************************************************************/
vector<size_t> SubgraphBuilder::bfs(const GaussianFactorGraph &gfg) const {
const VariableIndex variableIndex(gfg);
/* start from the first key of the first factor */
Key seed = gfg[0]->keys()[0];
const size_t n = variableIndex.size();
/* each vertex has self as the predecessor */
vector<size_t> bfsFactorIndices;
bfsFactorIndices.reserve(n - 1);
/* Initialize */
std::queue<size_t> q;
q.push(seed);
std::set<size_t> flags;
flags.insert(seed);
/* traversal */
while (!q.empty()) {
const size_t head = q.front();
q.pop();
for (const size_t index : variableIndex[head]) {
const GaussianFactor &gf = *gfg[index];
for (const size_t key : gf.keys()) {
if (flags.count(key) == 0) {
q.push(key);
flags.insert(key);
bfsFactorIndices.push_back(index);
}
}
}
}
return bfsFactorIndices;
}
/****************************************************************/
vector<size_t> SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg,
const FastMap<Key, size_t> &ordering,
const vector<double> &weights) const {
const VariableIndex variableIndex(gfg);
const size_t n = variableIndex.size();
const vector<size_t> sortedIndices = sort_idx(weights);
/* initialize buffer */
vector<size_t> treeIndices;
treeIndices.reserve(n - 1);
// container for acsendingly sorted edges
DSFVector dsf(n);
size_t count = 0;
double sum = 0.0;
for (const size_t index : sortedIndices) {
const GaussianFactor &gf = *gfg[index];
const auto keys = gf.keys();
if (keys.size() != 2) continue;
const size_t u = ordering.find(keys[0])->second,
v = ordering.find(keys[1])->second;
if (dsf.find(u) != dsf.find(v)) {
dsf.merge(u, v);
treeIndices.push_back(index);
sum += weights[index];
if (++count == n - 1) break;
}
}
return treeIndices;
}
/****************************************************************/
vector<size_t> SubgraphBuilder::sample(const vector<double> &weights,
const size_t t) const {
return UniqueSampler(weights, t);
}
/****************************************************************/
Subgraph SubgraphBuilder::operator()(
const GaussianFactorGraph &gfg) const {
const auto &p = parameters_;
const auto inverse_ordering = Ordering::Natural(gfg);
const FastMap<Key, size_t> forward_ordering = inverse_ordering.invert();
const size_t n = inverse_ordering.size(), m = gfg.size();
// Make sure the subgraph preconditioner does not include more than half of
// the edges beyond the spanning tree, or we might as well solve the whole
// thing.
size_t numExtraEdges = n * p.augmentationFactor;
const size_t numRemaining = m - (n - 1);
numExtraEdges = std::min(numExtraEdges, numRemaining / 2);
// Calculate weights
vector<double> weights = this->weights(gfg);
// Build spanning tree.
const vector<size_t> tree = buildTree(gfg, forward_ordering, weights);
if (tree.size() != n - 1) {
throw std::runtime_error(
"SubgraphBuilder::operator() failure: tree.size() != n-1");
}
// Downweight the tree edges to zero.
for (const size_t index : tree) {
weights[index] = 0.0;
}
/* decide how many edges to augment */
vector<size_t> offTree = sample(weights, numExtraEdges);
vector<size_t> subgraph = unary(gfg);
subgraph.insert(subgraph.end(), tree.begin(), tree.end());
subgraph.insert(subgraph.end(), offTree.begin(), offTree.end());
return Subgraph(subgraph);
}
/****************************************************************/
SubgraphBuilder::Weights SubgraphBuilder::weights(
const GaussianFactorGraph &gfg) const {
const size_t m = gfg.size();
Weights weight;
weight.reserve(m);
for (const GaussianFactor::shared_ptr &gf : gfg) {
switch (parameters_.skeletonWeight) {
case SubgraphBuilderParameters::EQUAL:
weight.push_back(1.0);
break;
case SubgraphBuilderParameters::RHS_2NORM: {
if (JacobianFactor::shared_ptr jf =
boost::dynamic_pointer_cast<JacobianFactor>(gf)) {
weight.push_back(jf->getb().norm());
} else if (HessianFactor::shared_ptr hf =
boost::dynamic_pointer_cast<HessianFactor>(gf)) {
weight.push_back(hf->linearTerm().norm());
}
} break;
case SubgraphBuilderParameters::LHS_FNORM: {
if (JacobianFactor::shared_ptr jf =
boost::dynamic_pointer_cast<JacobianFactor>(gf)) {
weight.push_back(std::sqrt(jf->getA().squaredNorm()));
} else if (HessianFactor::shared_ptr hf =
boost::dynamic_pointer_cast<HessianFactor>(gf)) {
weight.push_back(std::sqrt(hf->information().squaredNorm()));
}
} break;
case SubgraphBuilderParameters::RANDOM:
weight.push_back(std::rand() % 100 + 1.0);
break;
default:
throw std::invalid_argument(
"SubgraphBuilder::weights: undefined weight scheme ");
break;
}
}
return weight;
}
/*****************************************************************************/
GaussianFactorGraph::shared_ptr buildFactorSubgraph(
const GaussianFactorGraph &gfg, const Subgraph &subgraph,
const bool clone) {
auto subgraphFactors = boost::make_shared<GaussianFactorGraph>();
subgraphFactors->reserve(subgraph.size());
for (const auto &e : subgraph) {
const auto factor = gfg[e.index];
subgraphFactors->push_back(clone ? factor->clone(): factor);
}
return subgraphFactors;
}
/**************************************************************************************************/
std::pair<GaussianFactorGraph::shared_ptr, GaussianFactorGraph::shared_ptr> //
splitFactorGraph(const GaussianFactorGraph &factorGraph, const Subgraph &subgraph) {
// Get the subgraph by calling cheaper method
auto subgraphFactors = buildFactorSubgraph(factorGraph, subgraph, false);
// Now, copy all factors then set subGraph factors to zero
auto remaining = boost::make_shared<GaussianFactorGraph>(factorGraph);
for (const auto &e : subgraph) {
remaining->remove(e.index);
}
return std::make_pair(subgraphFactors, remaining);
}
/*****************************************************************************/
} // namespace gtsam

View File

@ -0,0 +1,182 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file SubgraphBuilder.h
* @date Dec 31, 2009
* @author Frank Dellaert, Yong-Dian Jian
*/
#pragma once
#include <gtsam/base/FastMap.h>
#include <gtsam/base/types.h>
#include <gtsam/dllexport.h>
#include <boost/serialization/nvp.hpp>
#include <boost/shared_ptr.hpp>
#include <vector>
namespace boost {
namespace serialization {
class access;
} /* namespace serialization */
} /* namespace boost */
namespace gtsam {
// Forward declarations
class GaussianFactorGraph;
struct PreconditionerParameters;
/**************************************************************************/
class GTSAM_EXPORT Subgraph {
public:
struct GTSAM_EXPORT Edge {
size_t index; /* edge id */
double weight; /* edge weight */
inline bool isUnitWeight() const { return (weight == 1.0); }
friend std::ostream &operator<<(std::ostream &os, const Edge &edge);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_NVP(index);
ar &BOOST_SERIALIZATION_NVP(weight);
}
};
typedef std::vector<Edge> Edges;
typedef std::vector<size_t> EdgeIndices;
typedef Edges::iterator iterator;
typedef Edges::const_iterator const_iterator;
protected:
Edges edges_; /* index to the factors */
public:
Subgraph() {}
Subgraph(const Subgraph &subgraph) : edges_(subgraph.edges()) {}
Subgraph(const Edges &edges) : edges_(edges) {}
Subgraph(const std::vector<size_t> &indices);
inline const Edges &edges() const { return edges_; }
inline size_t size() const { return edges_.size(); }
EdgeIndices edgeIndices() const;
iterator begin() { return edges_.begin(); }
const_iterator begin() const { return edges_.begin(); }
iterator end() { return edges_.end(); }
const_iterator end() const { return edges_.end(); }
void save(const std::string &fn) const;
static Subgraph load(const std::string &fn);
friend std::ostream &operator<<(std::ostream &os, const Subgraph &subgraph);
private:
friend class boost::serialization::access;
template <class Archive>
void serialize(Archive &ar, const unsigned int /*version*/) {
ar &BOOST_SERIALIZATION_NVP(edges_);
}
};
/****************************************************************************/
struct GTSAM_EXPORT SubgraphBuilderParameters {
typedef boost::shared_ptr<SubgraphBuilderParameters> shared_ptr;
enum Skeleton {
/* augmented tree */
NATURALCHAIN = 0, /* natural ordering of the graph */
BFS, /* breadth-first search tree */
KRUSKAL, /* maximum weighted spanning tree */
} skeletonType;
enum SkeletonWeight { /* how to weigh the graph edges */
EQUAL = 0, /* every block edge has equal weight */
RHS_2NORM, /* use the 2-norm of the rhs */
LHS_FNORM, /* use the frobenius norm of the lhs */
RANDOM, /* bounded random edge weight */
} skeletonWeight;
enum AugmentationWeight { /* how to weigh the graph edges */
SKELETON = 0, /* use the same weights in building
the skeleton */
// STRETCH, /* stretch in the
// laplacian sense */ GENERALIZED_STRETCH /*
// the generalized stretch defined in
// jian2013iros */
} augmentationWeight;
/// factor multiplied with n, yields number of extra edges.
double augmentationFactor;
SubgraphBuilderParameters()
: skeletonType(KRUSKAL),
skeletonWeight(RANDOM),
augmentationWeight(SKELETON),
augmentationFactor(1.0) {}
virtual ~SubgraphBuilderParameters() {}
/* for serialization */
void print() const;
virtual void print(std::ostream &os) const;
friend std::ostream &operator<<(std::ostream &os,
const PreconditionerParameters &p);
static Skeleton skeletonTranslator(const std::string &s);
static std::string skeletonTranslator(Skeleton s);
static SkeletonWeight skeletonWeightTranslator(const std::string &s);
static std::string skeletonWeightTranslator(SkeletonWeight w);
static AugmentationWeight augmentationWeightTranslator(const std::string &s);
static std::string augmentationWeightTranslator(AugmentationWeight w);
};
/*****************************************************************************/
class GTSAM_EXPORT SubgraphBuilder {
public:
typedef SubgraphBuilder Base;
typedef std::vector<double> Weights;
SubgraphBuilder(
const SubgraphBuilderParameters &p = SubgraphBuilderParameters())
: parameters_(p) {}
virtual ~SubgraphBuilder() {}
virtual Subgraph operator()(const GaussianFactorGraph &jfg) const;
private:
std::vector<size_t> buildTree(const GaussianFactorGraph &gfg,
const FastMap<Key, size_t> &ordering,
const std::vector<double> &weights) const;
std::vector<size_t> unary(const GaussianFactorGraph &gfg) const;
std::vector<size_t> natural_chain(const GaussianFactorGraph &gfg) const;
std::vector<size_t> bfs(const GaussianFactorGraph &gfg) const;
std::vector<size_t> kruskal(const GaussianFactorGraph &gfg,
const FastMap<Key, size_t> &ordering,
const std::vector<double> &weights) const;
std::vector<size_t> sample(const std::vector<double> &weights,
const size_t t) const;
Weights weights(const GaussianFactorGraph &gfg) const;
SubgraphBuilderParameters parameters_;
};
/** Select the factors in a factor graph according to the subgraph. */
boost::shared_ptr<GaussianFactorGraph> buildFactorSubgraph(
const GaussianFactorGraph &gfg, const Subgraph &subgraph, const bool clone);
/** Split the graph into a subgraph and the remaining edges.
* Note that the remaining factorgraph has null factors. */
std::pair<boost::shared_ptr<GaussianFactorGraph>, boost::shared_ptr<GaussianFactorGraph> >
splitFactorGraph(const GaussianFactorGraph &factorGraph, const Subgraph &subgraph);
} // namespace gtsam

View File

@ -1,6 +1,6 @@
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation, * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415 * Atlanta, Georgia 30332-0415
* All Rights Reserved * All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list) * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
@ -12,451 +12,85 @@
/** /**
* @file SubgraphPreconditioner.cpp * @file SubgraphPreconditioner.cpp
* @date Dec 31, 2009 * @date Dec 31, 2009
* @author: Frank Dellaert * @author Frank Dellaert, Yong-Dian Jian
*/ */
#include <gtsam/linear/SubgraphPreconditioner.h> #include <gtsam/linear/SubgraphPreconditioner.h>
#include <gtsam/linear/GaussianConditional.h>
#include <gtsam/linear/HessianFactor.h> #include <gtsam/linear/SubgraphBuilder.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/linear/JacobianFactor.h> #include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/GaussianEliminationTree.h>
#include <gtsam/inference/Ordering.h>
#include <gtsam/inference/VariableIndex.h>
#include <gtsam/base/DSFVector.h>
#include <gtsam/base/FastMap.h>
#include <gtsam/base/FastVector.h>
#include <gtsam/base/types.h> #include <gtsam/base/types.h>
#include <gtsam/base/Vector.h> #include <gtsam/base/Vector.h>
#include <boost/algorithm/string.hpp> #include <boost/algorithm/string.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <boost/serialization/vector.hpp>
#include <boost/range/adaptor/reversed.hpp> #include <boost/range/adaptor/reversed.hpp>
#include <boost/shared_ptr.hpp>
#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <iterator>
#include <list>
#include <map>
#include <numeric> // accumulate
#include <queue>
#include <set>
#include <stdexcept> #include <stdexcept>
#include <string>
#include <utility>
#include <vector>
using namespace std; using std::cout;
using std::endl;
using std::vector;
using std::ostream;
namespace gtsam { namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
static GaussianFactorGraph::shared_ptr convertToJacobianFactors(const GaussianFactorGraph &gfg) { static Vector getSubvector(const Vector &src, const KeyInfo &keyInfo,
GaussianFactorGraph::shared_ptr result(new GaussianFactorGraph()); const KeyVector &keys) {
for(const GaussianFactor::shared_ptr &gf: gfg) { /* a cache of starting index and dim */
JacobianFactor::shared_ptr jf = boost::dynamic_pointer_cast<JacobianFactor>(gf); vector<std::pair<size_t, size_t> > cache;
if( !jf ) { cache.reserve(3);
jf = boost::make_shared<JacobianFactor>(*gf); // Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky)
/* figure out dimension by traversing the keys */
size_t dim = 0;
for (const Key &key : keys) {
const KeyInfoEntry &entry = keyInfo.find(key)->second;
cache.emplace_back(entry.start, entry.dim);
dim += entry.dim;
}
/* use the cache to fill the result */
Vector result(dim);
size_t index = 0;
for (const auto &p : cache) {
result.segment(index, p.second) = src.segment(p.first, p.second);
index += p.second;
}
return result;
}
/*****************************************************************************/
static void setSubvector(const Vector &src, const KeyInfo &keyInfo,
const KeyVector &keys, Vector &dst) {
size_t index = 0;
for (const Key &key : keys) {
const KeyInfoEntry &entry = keyInfo.find(key)->second;
const size_t keyDim = entry.dim;
dst.segment(entry.start, keyDim) = src.segment(index, keyDim);
index += keyDim;
}
}
/* ************************************************************************* */
// Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with
// Cholesky)
static GaussianFactorGraph::shared_ptr convertToJacobianFactors(
const GaussianFactorGraph &gfg) {
auto result = boost::make_shared<GaussianFactorGraph>();
for (const auto &factor : gfg)
if (factor) {
auto jf = boost::dynamic_pointer_cast<JacobianFactor>(factor);
if (!jf) {
jf = boost::make_shared<JacobianFactor>(*factor);
} }
result->push_back(jf); result->push_back(jf);
} }
return result; return result;
} }
/*****************************************************************************/
static std::vector<size_t> iidSampler(const vector<double> &weight, const size_t n) {
/* compute the sum of the weights */
const double sum = std::accumulate(weight.begin(), weight.end(), 0.0);
/* make a normalized and accumulated version of the weight vector */
const size_t m = weight.size();
vector<double> w; w.reserve(m);
for ( size_t i = 0 ; i < m ; ++i ) {
w.push_back(weight[i]/sum);
}
vector<double> acc(m);
std::partial_sum(w.begin(),w.end(),acc.begin());
/* iid sample n times */
vector<size_t> result; result.reserve(n);
const double denominator = (double)RAND_MAX;
for ( size_t i = 0 ; i < n ; ++i ) {
const double value = rand() / denominator;
/* binary search the interval containing "value" */
vector<double>::iterator it = std::lower_bound(acc.begin(), acc.end(), value);
size_t idx = it - acc.begin();
result.push_back(idx);
}
return result;
}
/*****************************************************************************/
vector<size_t> uniqueSampler(const vector<double> &weight, const size_t n) {
const size_t m = weight.size();
if ( n > m ) throw std::invalid_argument("uniqueSampler: invalid input size");
vector<size_t> result;
size_t count = 0;
std::vector<bool> touched(m, false);
while ( count < n ) {
std::vector<size_t> localIndices; localIndices.reserve(n-count);
std::vector<double> localWeights; localWeights.reserve(n-count);
/* collect data */
for ( size_t i = 0 ; i < m ; ++i ) {
if ( !touched[i] ) {
localIndices.push_back(i);
localWeights.push_back(weight[i]);
}
}
/* sampling and cache results */
vector<size_t> samples = iidSampler(localWeights, n-count);
for ( const size_t &id: samples ) {
if ( touched[id] == false ) {
touched[id] = true ;
result.push_back(id);
if ( ++count >= n ) break;
}
}
}
return result;
}
/****************************************************************************/
Subgraph::Subgraph(const std::vector<size_t> &indices) {
edges_.reserve(indices.size());
for ( const size_t &idx: indices ) {
edges_.push_back(SubgraphEdge(idx, 1.0));
}
}
/****************************************************************************/
std::vector<size_t> Subgraph::edgeIndices() const {
std::vector<size_t> eid; eid.reserve(size());
for ( const SubgraphEdge &edge: edges_ ) {
eid.push_back(edge.index_);
}
return eid;
}
/****************************************************************************/
void Subgraph::save(const std::string &fn) const {
std::ofstream os(fn.c_str());
boost::archive::text_oarchive oa(os);
oa << *this;
os.close();
}
/****************************************************************************/
Subgraph::shared_ptr Subgraph::load(const std::string &fn) {
std::ifstream is(fn.c_str());
boost::archive::text_iarchive ia(is);
Subgraph::shared_ptr subgraph(new Subgraph());
ia >> *subgraph;
is.close();
return subgraph;
}
/****************************************************************************/
std::ostream &operator<<(std::ostream &os, const SubgraphEdge &edge) {
if ( edge.weight() != 1.0 )
os << edge.index() << "(" << std::setprecision(2) << edge.weight() << ")";
else
os << edge.index() ;
return os;
}
/****************************************************************************/
std::ostream &operator<<(std::ostream &os, const Subgraph &subgraph) {
os << "Subgraph" << endl;
for ( const SubgraphEdge &e: subgraph.edges() ) {
os << e << ", " ;
}
return os;
}
/*****************************************************************************/
void SubgraphBuilderParameters::print() const {
print(cout);
}
/***************************************************************************************/
void SubgraphBuilderParameters::print(ostream &os) const {
os << "SubgraphBuilderParameters" << endl
<< "skeleton: " << skeletonTranslator(skeleton_) << endl
<< "skeleton weight: " << skeletonWeightTranslator(skeletonWeight_) << endl
<< "augmentation weight: " << augmentationWeightTranslator(augmentationWeight_) << endl
;
}
/*****************************************************************************/
ostream& operator<<(ostream &os, const SubgraphBuilderParameters &p) {
p.print(os);
return os;
}
/*****************************************************************************/
SubgraphBuilderParameters::Skeleton SubgraphBuilderParameters::skeletonTranslator(const std::string &src){
std::string s = src; boost::algorithm::to_upper(s);
if (s == "NATURALCHAIN") return NATURALCHAIN;
else if (s == "BFS") return BFS;
else if (s == "KRUSKAL") return KRUSKAL;
throw invalid_argument("SubgraphBuilderParameters::skeletonTranslator undefined string " + s);
return KRUSKAL;
}
/****************************************************************/
std::string SubgraphBuilderParameters::skeletonTranslator(Skeleton w) {
if ( w == NATURALCHAIN )return "NATURALCHAIN";
else if ( w == BFS ) return "BFS";
else if ( w == KRUSKAL )return "KRUSKAL";
else return "UNKNOWN";
}
/****************************************************************/
SubgraphBuilderParameters::SkeletonWeight SubgraphBuilderParameters::skeletonWeightTranslator(const std::string &src) {
std::string s = src; boost::algorithm::to_upper(s);
if (s == "EQUAL") return EQUAL;
else if (s == "RHS") return RHS_2NORM;
else if (s == "LHS") return LHS_FNORM;
else if (s == "RANDOM") return RANDOM;
throw invalid_argument("SubgraphBuilderParameters::skeletonWeightTranslator undefined string " + s);
return EQUAL;
}
/****************************************************************/
std::string SubgraphBuilderParameters::skeletonWeightTranslator(SkeletonWeight w) {
if ( w == EQUAL ) return "EQUAL";
else if ( w == RHS_2NORM ) return "RHS";
else if ( w == LHS_FNORM ) return "LHS";
else if ( w == RANDOM ) return "RANDOM";
else return "UNKNOWN";
}
/****************************************************************/
SubgraphBuilderParameters::AugmentationWeight SubgraphBuilderParameters::augmentationWeightTranslator(const std::string &src) {
std::string s = src; boost::algorithm::to_upper(s);
if (s == "SKELETON") return SKELETON;
// else if (s == "STRETCH") return STRETCH;
// else if (s == "GENERALIZED_STRETCH") return GENERALIZED_STRETCH;
throw invalid_argument("SubgraphBuilder::Parameters::augmentationWeightTranslator undefined string " + s);
return SKELETON;
}
/****************************************************************/
std::string SubgraphBuilderParameters::augmentationWeightTranslator(AugmentationWeight w) {
if ( w == SKELETON ) return "SKELETON";
// else if ( w == STRETCH ) return "STRETCH";
// else if ( w == GENERALIZED_STRETCH ) return "GENERALIZED_STRETCH";
else return "UNKNOWN";
}
/****************************************************************/
std::vector<size_t> SubgraphBuilder::buildTree(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &w) const {
const SubgraphBuilderParameters &p = parameters_;
switch (p.skeleton_) {
case SubgraphBuilderParameters::NATURALCHAIN:
return natural_chain(gfg);
break;
case SubgraphBuilderParameters::BFS:
return bfs(gfg);
break;
case SubgraphBuilderParameters::KRUSKAL:
return kruskal(gfg, ordering, w);
break;
default:
cerr << "SubgraphBuilder::buildTree undefined skeleton type" << endl;
break;
}
return vector<size_t>();
}
/****************************************************************/
std::vector<size_t> SubgraphBuilder::unary(const GaussianFactorGraph &gfg) const {
std::vector<size_t> result ;
size_t idx = 0;
for ( const GaussianFactor::shared_ptr &gf: gfg ) {
if ( gf->size() == 1 ) {
result.push_back(idx);
}
idx++;
}
return result;
}
/****************************************************************/
std::vector<size_t> SubgraphBuilder::natural_chain(const GaussianFactorGraph &gfg) const {
std::vector<size_t> result ;
size_t idx = 0;
for ( const GaussianFactor::shared_ptr &gf: gfg ) {
if ( gf->size() == 2 ) {
const Key k0 = gf->keys()[0], k1 = gf->keys()[1];
if ( (k1-k0) == 1 || (k0-k1) == 1 )
result.push_back(idx);
}
idx++;
}
return result;
}
/****************************************************************/
std::vector<size_t> SubgraphBuilder::bfs(const GaussianFactorGraph &gfg) const {
const VariableIndex variableIndex(gfg);
/* start from the first key of the first factor */
Key seed = gfg[0]->keys()[0];
const size_t n = variableIndex.size();
/* each vertex has self as the predecessor */
std::vector<size_t> result;
result.reserve(n-1);
/* Initialize */
std::queue<size_t> q;
q.push(seed);
std::set<size_t> flags;
flags.insert(seed);
/* traversal */
while ( !q.empty() ) {
const size_t head = q.front(); q.pop();
for ( const size_t id: variableIndex[head] ) {
const GaussianFactor &gf = *gfg[id];
for ( const size_t key: gf.keys() ) {
if ( flags.count(key) == 0 ) {
q.push(key);
flags.insert(key);
result.push_back(id);
}
}
}
}
return result;
}
/****************************************************************/
std::vector<size_t> SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &w) const {
const VariableIndex variableIndex(gfg);
const size_t n = variableIndex.size();
const vector<size_t> idx = sort_idx(w) ;
/* initialize buffer */
vector<size_t> result;
result.reserve(n-1);
// container for acsendingly sorted edges
DSFVector D(n) ;
size_t count = 0 ; double sum = 0.0 ;
for (const size_t id: idx) {
const GaussianFactor &gf = *gfg[id];
if ( gf.keys().size() != 2 ) continue;
const size_t u = ordering.find(gf.keys()[0])->second,
u_root = D.find(u),
v = ordering.find(gf.keys()[1])->second,
v_root = D.find(v) ;
if ( u_root != v_root ) {
D.merge(u_root, v_root) ;
result.push_back(id) ;
sum += w[id] ;
if ( ++count == n-1 ) break ;
}
}
return result;
}
/****************************************************************/
std::vector<size_t> SubgraphBuilder::sample(const std::vector<double> &weights, const size_t t) const {
return uniqueSampler(weights, t);
}
/****************************************************************/
Subgraph::shared_ptr SubgraphBuilder::operator() (const GaussianFactorGraph &gfg) const {
const SubgraphBuilderParameters &p = parameters_;
const Ordering inverse_ordering = Ordering::Natural(gfg);
const FastMap<Key, size_t> forward_ordering = inverse_ordering.invert();
const size_t n = inverse_ordering.size(), t = n * p.complexity_ ;
vector<double> w = weights(gfg);
const vector<size_t> tree = buildTree(gfg, forward_ordering, w);
/* sanity check */
if ( tree.size() != n-1 ) {
throw runtime_error("SubgraphBuilder::operator() tree.size() != n-1 failed ");
}
/* down weight the tree edges to zero */
for ( const size_t id: tree ) {
w[id] = 0.0;
}
/* decide how many edges to augment */
std::vector<size_t> offTree = sample(w, t);
vector<size_t> subgraph = unary(gfg);
subgraph.insert(subgraph.end(), tree.begin(), tree.end());
subgraph.insert(subgraph.end(), offTree.begin(), offTree.end());
return boost::make_shared<Subgraph>(subgraph);
}
/****************************************************************/
SubgraphBuilder::Weights SubgraphBuilder::weights(const GaussianFactorGraph &gfg) const
{
const size_t m = gfg.size() ;
Weights weight; weight.reserve(m);
for(const GaussianFactor::shared_ptr &gf: gfg ) {
switch ( parameters_.skeletonWeight_ ) {
case SubgraphBuilderParameters::EQUAL:
weight.push_back(1.0);
break;
case SubgraphBuilderParameters::RHS_2NORM:
{
if ( JacobianFactor::shared_ptr jf = boost::dynamic_pointer_cast<JacobianFactor>(gf) ) {
weight.push_back(jf->getb().norm());
}
else if ( HessianFactor::shared_ptr hf = boost::dynamic_pointer_cast<HessianFactor>(gf) ) {
weight.push_back(hf->linearTerm().norm());
}
}
break;
case SubgraphBuilderParameters::LHS_FNORM:
{
if ( JacobianFactor::shared_ptr jf = boost::dynamic_pointer_cast<JacobianFactor>(gf) ) {
weight.push_back(std::sqrt(jf->getA().squaredNorm()));
}
else if ( HessianFactor::shared_ptr hf = boost::dynamic_pointer_cast<HessianFactor>(gf) ) {
weight.push_back(std::sqrt(hf->information().squaredNorm()));
}
}
break;
case SubgraphBuilderParameters::RANDOM:
weight.push_back(std::rand()%100 + 1.0);
break;
default:
throw invalid_argument("SubgraphBuilder::weights: undefined weight scheme ");
break;
}
}
return weight;
}
/* ************************************************************************* */ /* ************************************************************************* */
SubgraphPreconditioner::SubgraphPreconditioner(const SubgraphPreconditionerParameters &p) : SubgraphPreconditioner::SubgraphPreconditioner(const SubgraphPreconditionerParameters &p) :
parameters_(p) {} parameters_(p) {}
@ -484,9 +118,9 @@ double SubgraphPreconditioner::error(const VectorValues& y) const {
/* ************************************************************************* */ /* ************************************************************************* */
// gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar), // gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar),
VectorValues SubgraphPreconditioner::gradient(const VectorValues& y) const { VectorValues SubgraphPreconditioner::gradient(const VectorValues &y) const {
VectorValues x = Rc1()->backSubstitute(y); /* inv(R1)*y */ VectorValues x = Rc1()->backSubstitute(y); /* inv(R1)*y */
Errors e = (*Ab2()*x - *b2bar()); /* (A2*inv(R1)*y-b2bar) */ Errors e = (*Ab2() * x - *b2bar()); /* (A2*inv(R1)*y-b2bar) */
VectorValues v = VectorValues::Zero(x); VectorValues v = VectorValues::Zero(x);
Ab2()->transposeMultiplyAdd(1.0, e, v); /* A2'*(A2*inv(R1)*y-b2bar) */ Ab2()->transposeMultiplyAdd(1.0, e, v); /* A2'*(A2*inv(R1)*y-b2bar) */
return y + Rc1()->backSubstituteTranspose(v); return y + Rc1()->backSubstituteTranspose(v);
@ -495,7 +129,6 @@ VectorValues SubgraphPreconditioner::gradient(const VectorValues& y) const {
/* ************************************************************************* */ /* ************************************************************************* */
// Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y] // Apply operator A, A*y = [I;A2*inv(R1)]*y = [y; A2*inv(R1)*y]
Errors SubgraphPreconditioner::operator*(const VectorValues& y) const { Errors SubgraphPreconditioner::operator*(const VectorValues& y) const {
Errors e(y); Errors e(y);
VectorValues x = Rc1()->backSubstitute(y); /* x=inv(R1)*y */ VectorValues x = Rc1()->backSubstitute(y); /* x=inv(R1)*y */
Errors e2 = *Ab2() * x; /* A2*x */ Errors e2 = *Ab2() * x; /* A2*x */
@ -508,8 +141,10 @@ Errors SubgraphPreconditioner::operator*(const VectorValues& y) const {
void SubgraphPreconditioner::multiplyInPlace(const VectorValues& y, Errors& e) const { void SubgraphPreconditioner::multiplyInPlace(const VectorValues& y, Errors& e) const {
Errors::iterator ei = e.begin(); Errors::iterator ei = e.begin();
for (size_t i = 0; i < y.size(); ++i, ++ei) for(const auto& key_value: y) {
*ei = y[i]; *ei = key_value.second;
++ei;
}
// Add A2 contribution // Add A2 contribution
VectorValues x = Rc1()->backSubstitute(y); // x=inv(R1)*y VectorValues x = Rc1()->backSubstitute(y); // x=inv(R1)*y
@ -522,8 +157,10 @@ VectorValues SubgraphPreconditioner::operator^(const Errors& e) const {
Errors::const_iterator it = e.begin(); Errors::const_iterator it = e.begin();
VectorValues y = zero(); VectorValues y = zero();
for (size_t i = 0; i < y.size(); ++i, ++it) for(auto& key_value: y) {
y[i] = *it; key_value.second = *it;
++it;
}
transposeMultiplyAdd2(1.0, it, e.end(), y); transposeMultiplyAdd2(1.0, it, e.end(), y);
return y; return y;
} }
@ -534,9 +171,10 @@ void SubgraphPreconditioner::transposeMultiplyAdd
(double alpha, const Errors& e, VectorValues& y) const { (double alpha, const Errors& e, VectorValues& y) const {
Errors::const_iterator it = e.begin(); Errors::const_iterator it = e.begin();
for (size_t i = 0; i < y.size(); ++i, ++it) { for(auto& key_value: y) {
const Vector& ei = *it; const Vector& ei = *it;
axpy(alpha, ei, y[i]); axpy(alpha, ei, key_value.second);
++it;
} }
transposeMultiplyAdd2(alpha, it, e.end(), y); transposeMultiplyAdd2(alpha, it, e.end(), y);
} }
@ -563,47 +201,51 @@ void SubgraphPreconditioner::print(const std::string& s) const {
} }
/*****************************************************************************/ /*****************************************************************************/
void SubgraphPreconditioner::solve(const Vector& y, Vector &x) const void SubgraphPreconditioner::solve(const Vector &y, Vector &x) const {
{ assert(x.size() == y.size());
/* copy first */
std::copy(y.data(), y.data() + y.rows(), x.data());
/* in place back substitute */ /* back substitute */
for (auto cg: boost::adaptors::reverse(*Rc1_)) { for (const auto &cg : boost::adaptors::reverse(*Rc1_)) {
/* collect a subvector of x that consists of the parents of cg (S) */ /* collect a subvector of x that consists of the parents of cg (S) */
const Vector xParent = getSubvector(x, keyInfo_, KeyVector(cg->beginParents(), cg->endParents())); const KeyVector parentKeys(cg->beginParents(), cg->endParents());
const Vector rhsFrontal = getSubvector(x, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals())); const KeyVector frontalKeys(cg->beginFrontals(), cg->endFrontals());
const Vector xParent = getSubvector(x, keyInfo_, parentKeys);
const Vector rhsFrontal = getSubvector(y, keyInfo_, frontalKeys);
/* compute the solution for the current pivot */ /* compute the solution for the current pivot */
const Vector solFrontal = cg->get_R().triangularView<Eigen::Upper>().solve(rhsFrontal - cg->get_S() * xParent); const Vector solFrontal = cg->R().triangularView<Eigen::Upper>().solve(
rhsFrontal - cg->S() * xParent);
/* assign subvector of sol to the frontal variables */ /* assign subvector of sol to the frontal variables */
setSubvector(solFrontal, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals()), x); setSubvector(solFrontal, keyInfo_, frontalKeys, x);
} }
} }
/*****************************************************************************/ /*****************************************************************************/
void SubgraphPreconditioner::transposeSolve(const Vector& y, Vector& x) const void SubgraphPreconditioner::transposeSolve(const Vector &y, Vector &x) const {
{
/* copy first */ /* copy first */
assert(x.size() == y.size());
std::copy(y.data(), y.data() + y.rows(), x.data()); std::copy(y.data(), y.data() + y.rows(), x.data());
/* in place back substitute */ /* in place back substitute */
for(const boost::shared_ptr<GaussianConditional> & cg: *Rc1_) { for (const auto &cg : *Rc1_) {
const Vector rhsFrontal = getSubvector(x, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals())); const KeyVector frontalKeys(cg->beginFrontals(), cg->endFrontals());
// const Vector solFrontal = cg->get_R().triangularView<Eigen::Upper>().transpose().solve(rhsFrontal); const Vector rhsFrontal = getSubvector(x, keyInfo_, frontalKeys);
const Vector solFrontal = cg->get_R().transpose().triangularView<Eigen::Lower>().solve(rhsFrontal); const Vector solFrontal =
cg->R().transpose().triangularView<Eigen::Lower>().solve(
rhsFrontal);
// Check for indeterminant solution // Check for indeterminant solution
if ( solFrontal.hasNaN()) throw IndeterminantLinearSystemException(cg->keys().front()); if (solFrontal.hasNaN())
throw IndeterminantLinearSystemException(cg->keys().front());
/* assign subvector of sol to the frontal variables */ /* assign subvector of sol to the frontal variables */
setSubvector(solFrontal, keyInfo_, KeyVector(cg->beginFrontals(), cg->endFrontals()), x); setSubvector(solFrontal, keyInfo_, frontalKeys, x);
/* substract from parent variables */ /* substract from parent variables */
for (GaussianConditional::const_iterator it = cg->beginParents(); it != cg->endParents(); it++) { for (auto it = cg->beginParents(); it != cg->endParents(); it++) {
KeyInfo::const_iterator it2 = keyInfo_.find(*it); const KeyInfoEntry &entry = keyInfo_.find(*it)->second;
Eigen::Map<Vector> rhsParent(x.data()+it2->second.colstart(), it2->second.dim(), 1); Eigen::Map<Vector> rhsParent(x.data() + entry.start, entry.dim, 1);
rhsParent -= Matrix(cg->getA(it)).transpose() * solFrontal; rhsParent -= Matrix(cg->getA(it)).transpose() * solFrontal;
} }
} }
@ -613,67 +255,18 @@ void SubgraphPreconditioner::transposeSolve(const Vector& y, Vector& x) const
void SubgraphPreconditioner::build(const GaussianFactorGraph &gfg, const KeyInfo &keyInfo, const std::map<Key,Vector> &lambda) void SubgraphPreconditioner::build(const GaussianFactorGraph &gfg, const KeyInfo &keyInfo, const std::map<Key,Vector> &lambda)
{ {
/* identify the subgraph structure */ /* identify the subgraph structure */
const SubgraphBuilder builder(parameters_.builderParams_); const SubgraphBuilder builder(parameters_.builderParams);
Subgraph::shared_ptr subgraph = builder(gfg); auto subgraph = builder(gfg);
keyInfo_ = keyInfo; keyInfo_ = keyInfo;
/* build factor subgraph */ /* build factor subgraph */
GaussianFactorGraph::shared_ptr gfg_subgraph = buildFactorSubgraph(gfg, *subgraph, true); GaussianFactorGraph::shared_ptr gfg_subgraph = buildFactorSubgraph(gfg, subgraph, true);
/* factorize and cache BayesNet */ /* factorize and cache BayesNet */
Rc1_ = gfg_subgraph->eliminateSequential(); Rc1_ = gfg_subgraph->eliminateSequential();
} }
/*****************************************************************************/ /*****************************************************************************/
Vector getSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &keys) {
/* a cache of starting index and dim */
typedef vector<pair<size_t, size_t> > Cache;
Cache cache;
/* figure out dimension by traversing the keys */
size_t d = 0;
for ( const Key &key: keys ) {
const KeyInfoEntry &entry = keyInfo.find(key)->second;
cache.push_back(make_pair(entry.colstart(), entry.dim()));
d += entry.dim();
}
/* use the cache to fill the result */
Vector result = Vector::Zero(d, 1);
size_t idx = 0;
for ( const Cache::value_type &p: cache ) {
result.segment(idx, p.second) = src.segment(p.first, p.second) ;
idx += p.second;
}
return result;
}
/*****************************************************************************/
void setSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &keys, Vector &dst) {
/* use the cache */
size_t idx = 0;
for ( const Key &key: keys ) {
const KeyInfoEntry &entry = keyInfo.find(key)->second;
dst.segment(entry.colstart(), entry.dim()) = src.segment(idx, entry.dim()) ;
idx += entry.dim();
}
}
/*****************************************************************************/
boost::shared_ptr<GaussianFactorGraph>
buildFactorSubgraph(const GaussianFactorGraph &gfg, const Subgraph &subgraph, const bool clone) {
GaussianFactorGraph::shared_ptr result(new GaussianFactorGraph());
result->reserve(subgraph.size());
for ( const SubgraphEdge &e: subgraph ) {
const size_t idx = e.index();
if ( clone ) result->push_back(gfg[idx]->clone());
else result->push_back(gfg[idx]);
}
return result;
}
} // nsamespace gtsam } // nsamespace gtsam

View File

@ -1,6 +1,6 @@
/* ---------------------------------------------------------------------------- /* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation, * GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415 * Atlanta, Georgia 30332-0415
* All Rights Reserved * All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list) * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
@ -17,30 +17,16 @@
#pragma once #pragma once
#include <gtsam/linear/SubgraphBuilder.h>
#include <gtsam/linear/Errors.h> #include <gtsam/linear/Errors.h>
#include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/IterativeSolver.h> #include <gtsam/linear/IterativeSolver.h>
#include <gtsam/linear/Preconditioner.h> #include <gtsam/linear/Preconditioner.h>
#include <gtsam/linear/VectorValues.h> #include <gtsam/linear/VectorValues.h>
#include <gtsam/base/FastMap.h>
#include <gtsam/base/FastVector.h>
#include <gtsam/base/types.h>
#include <gtsam/base/Vector.h>
#include <gtsam/dllexport.h> #include <gtsam/dllexport.h>
#include <boost/serialization/nvp.hpp>
#include <boost/shared_ptr.hpp> #include <boost/shared_ptr.hpp>
#include <map> #include <map>
#include <utility>
#include <vector>
namespace boost {
namespace serialization {
class access;
} /* namespace serialization */
} /* namespace boost */
namespace gtsam { namespace gtsam {
@ -49,142 +35,11 @@ namespace gtsam {
class GaussianFactorGraph; class GaussianFactorGraph;
class VectorValues; class VectorValues;
struct GTSAM_EXPORT SubgraphEdge {
size_t index_; /* edge id */
double weight_; /* edge weight */
SubgraphEdge() : index_(0), weight_(1.0) {}
SubgraphEdge(const SubgraphEdge &e) : index_(e.index()), weight_(e.weight()) {}
SubgraphEdge(const size_t index, const double weight = 1.0): index_(index), weight_(weight) {}
inline size_t index() const { return index_; }
inline double weight() const { return weight_; }
inline bool isUnitWeight() const { return (weight_ == 1.0); }
friend std::ostream &operator<<(std::ostream &os, const SubgraphEdge &edge);
private:
friend class boost::serialization::access;
template<class Archive>
void serialize(Archive & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_NVP(index_);
ar & BOOST_SERIALIZATION_NVP(weight_);
}
};
/**************************************************************************/
class GTSAM_EXPORT Subgraph {
public:
typedef boost::shared_ptr<Subgraph> shared_ptr;
typedef std::vector<shared_ptr> vector_shared_ptr;
typedef std::vector<SubgraphEdge> Edges;
typedef std::vector<size_t> EdgeIndices;
typedef Edges::iterator iterator;
typedef Edges::const_iterator const_iterator;
protected:
Edges edges_; /* index to the factors */
public:
Subgraph() {}
Subgraph(const Subgraph &subgraph) : edges_(subgraph.edges()) {}
Subgraph(const Edges &edges) : edges_(edges) {}
Subgraph(const std::vector<size_t> &indices) ;
inline const Edges& edges() const { return edges_; }
inline size_t size() const { return edges_.size(); }
EdgeIndices edgeIndices() const;
iterator begin() { return edges_.begin(); }
const_iterator begin() const { return edges_.begin(); }
iterator end() { return edges_.end(); }
const_iterator end() const { return edges_.end(); }
void save(const std::string &fn) const;
static shared_ptr load(const std::string &fn);
friend std::ostream &operator<<(std::ostream &os, const Subgraph &subgraph);
private:
friend class boost::serialization::access;
template<class Archive>
void serialize(Archive & ar, const unsigned int /*version*/) {
ar & BOOST_SERIALIZATION_NVP(edges_);
}
};
/****************************************************************************/
struct GTSAM_EXPORT SubgraphBuilderParameters {
public:
typedef boost::shared_ptr<SubgraphBuilderParameters> shared_ptr;
enum Skeleton {
/* augmented tree */
NATURALCHAIN = 0, /* natural ordering of the graph */
BFS, /* breadth-first search tree */
KRUSKAL, /* maximum weighted spanning tree */
} skeleton_ ;
enum SkeletonWeight { /* how to weigh the graph edges */
EQUAL = 0, /* every block edge has equal weight */
RHS_2NORM, /* use the 2-norm of the rhs */
LHS_FNORM, /* use the frobenius norm of the lhs */
RANDOM, /* bounded random edge weight */
} skeletonWeight_ ;
enum AugmentationWeight { /* how to weigh the graph edges */
SKELETON = 0, /* use the same weights in building the skeleton */
// STRETCH, /* stretch in the laplacian sense */
// GENERALIZED_STRETCH /* the generalized stretch defined in jian2013iros */
} augmentationWeight_ ;
double complexity_;
SubgraphBuilderParameters()
: skeleton_(KRUSKAL), skeletonWeight_(RANDOM), augmentationWeight_(SKELETON), complexity_(1.0) {}
virtual ~SubgraphBuilderParameters() {}
/* for serialization */
void print() const ;
virtual void print(std::ostream &os) const ;
friend std::ostream& operator<<(std::ostream &os, const PreconditionerParameters &p);
static Skeleton skeletonTranslator(const std::string &s);
static std::string skeletonTranslator(Skeleton w);
static SkeletonWeight skeletonWeightTranslator(const std::string &s);
static std::string skeletonWeightTranslator(SkeletonWeight w);
static AugmentationWeight augmentationWeightTranslator(const std::string &s);
static std::string augmentationWeightTranslator(AugmentationWeight w);
};
/*****************************************************************************/
class GTSAM_EXPORT SubgraphBuilder {
public:
typedef SubgraphBuilder Base;
typedef boost::shared_ptr<SubgraphBuilder> shared_ptr;
typedef std::vector<double> Weights;
SubgraphBuilder(const SubgraphBuilderParameters &p = SubgraphBuilderParameters())
: parameters_(p) {}
virtual ~SubgraphBuilder() {}
virtual boost::shared_ptr<Subgraph> operator() (const GaussianFactorGraph &jfg) const ;
private:
std::vector<size_t> buildTree(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &weights) const ;
std::vector<size_t> unary(const GaussianFactorGraph &gfg) const ;
std::vector<size_t> natural_chain(const GaussianFactorGraph &gfg) const ;
std::vector<size_t> bfs(const GaussianFactorGraph &gfg) const ;
std::vector<size_t> kruskal(const GaussianFactorGraph &gfg, const FastMap<Key, size_t> &ordering, const std::vector<double> &w) const ;
std::vector<size_t> sample(const std::vector<double> &weights, const size_t t) const ;
Weights weights(const GaussianFactorGraph &gfg) const;
SubgraphBuilderParameters parameters_;
};
/*******************************************************************************************/
struct GTSAM_EXPORT SubgraphPreconditionerParameters : public PreconditionerParameters { struct GTSAM_EXPORT SubgraphPreconditionerParameters : public PreconditionerParameters {
typedef PreconditionerParameters Base;
typedef boost::shared_ptr<SubgraphPreconditionerParameters> shared_ptr; typedef boost::shared_ptr<SubgraphPreconditionerParameters> shared_ptr;
SubgraphPreconditionerParameters(const SubgraphBuilderParameters &p = SubgraphBuilderParameters()) SubgraphPreconditionerParameters(const SubgraphBuilderParameters &p = SubgraphBuilderParameters())
: Base(), builderParams_(p) {} : builderParams(p) {}
virtual ~SubgraphPreconditionerParameters() {} SubgraphBuilderParameters builderParams;
SubgraphBuilderParameters builderParams_;
}; };
/** /**
@ -249,8 +104,8 @@ namespace gtsam {
/* A zero VectorValues with the structure of xbar */ /* A zero VectorValues with the structure of xbar */
VectorValues zero() const { VectorValues zero() const {
VectorValues V(VectorValues::Zero(*xbar_)); assert(xbar_);
return V ; return VectorValues::Zero(*xbar_);
} }
/** /**
@ -285,50 +140,19 @@ namespace gtsam {
/*****************************************************************************/ /*****************************************************************************/
/* implement virtual functions of Preconditioner */ /* implement virtual functions of Preconditioner */
/* Computation Interfaces for Vector */ /// implement x = R^{-1} y
virtual void solve(const Vector& y, Vector &x) const; void solve(const Vector& y, Vector &x) const override;
virtual void transposeSolve(const Vector& y, Vector& x) const ;
virtual void build( /// implement x = R^{-T} y
void transposeSolve(const Vector& y, Vector& x) const override;
/// build/factorize the preconditioner
void build(
const GaussianFactorGraph &gfg, const GaussianFactorGraph &gfg,
const KeyInfo &info, const KeyInfo &info,
const std::map<Key,Vector> &lambda const std::map<Key,Vector> &lambda
) ; ) override;
/*****************************************************************************/ /*****************************************************************************/
}; };
/* get subvectors */
Vector getSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &keys);
/* set subvectors */
void setSubvector(const Vector &src, const KeyInfo &keyInfo, const KeyVector &keys, Vector &dst);
/* build a factor subgraph, which is defined as a set of weighted edges (factors) */
boost::shared_ptr<GaussianFactorGraph>
buildFactorSubgraph(const GaussianFactorGraph &gfg, const Subgraph &subgraph, const bool clone);
/* sort the container and return permutation index with default comparator */
template <typename Container>
std::vector<size_t> sort_idx(const Container &src)
{
typedef typename Container::value_type T;
const size_t n = src.size() ;
std::vector<std::pair<size_t,T> > tmp;
tmp.reserve(n);
for ( size_t i = 0 ; i < n ; i++ )
tmp.push_back(std::make_pair(i, src[i]));
/* sort */
std::stable_sort(tmp.begin(), tmp.end()) ;
/* copy back */
std::vector<size_t> idx; idx.reserve(n);
for ( size_t i = 0 ; i < n ; i++ ) {
idx.push_back(tmp[i].first) ;
}
return idx;
}
} // namespace gtsam } // namespace gtsam

View File

@ -18,153 +18,94 @@
*/ */
#include <gtsam/linear/SubgraphSolver.h> #include <gtsam/linear/SubgraphSolver.h>
#include <gtsam/linear/SubgraphBuilder.h>
#include <gtsam/linear/GaussianBayesNet.h> #include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/linear/iterative-inl.h> #include <gtsam/linear/iterative-inl.h>
#include <gtsam/linear/GaussianFactorGraph.h> #include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/SubgraphPreconditioner.h> #include <gtsam/linear/SubgraphPreconditioner.h>
#include <gtsam/base/DSFVector.h>
using namespace std; using namespace std;
namespace gtsam { namespace gtsam {
/**************************************************************************************************/ /**************************************************************************************************/
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &gfg, // Just taking system [A|b]
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab,
const Parameters &parameters, const Ordering& ordering) : const Parameters &parameters, const Ordering& ordering) :
parameters_(parameters), ordering_(ordering) { parameters_(parameters) {
initialize(gfg); GaussianFactorGraph::shared_ptr Ab1,Ab2;
std::tie(Ab1, Ab2) = splitGraph(Ab);
if (parameters_.verbosity())
cout << "Split A into (A1) " << Ab1->size() << " and (A2) " << Ab2->size()
<< " factors" << endl;
auto Rc1 = Ab1->eliminateSequential(ordering, EliminateQR);
auto xbar = boost::make_shared<VectorValues>(Rc1->optimize());
pc_ = boost::make_shared<SubgraphPreconditioner>(Ab2, Rc1, xbar);
} }
/**************************************************************************************************/ /**************************************************************************************************/
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph::shared_ptr &jfg, // Taking eliminated tree [R1|c] and constraint graph [A2|b2]
const Parameters &parameters, const Ordering& ordering) : SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1,
parameters_(parameters), ordering_(ordering) { const GaussianFactorGraph::shared_ptr &Ab2,
initialize(*jfg); const Parameters &parameters)
: parameters_(parameters) {
auto xbar = boost::make_shared<VectorValues>(Rc1->optimize());
pc_ = boost::make_shared<SubgraphPreconditioner>(Ab2, Rc1, xbar);
} }
/**************************************************************************************************/ /**************************************************************************************************/
// Taking subgraphs [A1|b1] and [A2|b2]
// delegate up
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1, SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1,
const GaussianFactorGraph &Ab2, const Parameters &parameters, const GaussianFactorGraph::shared_ptr &Ab2,
const Ordering& ordering) : const Parameters &parameters,
parameters_(parameters), ordering_(ordering) { const Ordering &ordering)
: SubgraphSolver(Ab1.eliminateSequential(ordering, EliminateQR), Ab2,
GaussianBayesNet::shared_ptr Rc1 = Ab1.eliminateSequential(ordering_, parameters) {}
EliminateQR);
initialize(Rc1, boost::make_shared<GaussianFactorGraph>(Ab2));
}
/**************************************************************************************************/
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph::shared_ptr &Ab1,
const GaussianFactorGraph::shared_ptr &Ab2, const Parameters &parameters,
const Ordering& ordering) :
parameters_(parameters), ordering_(ordering) {
GaussianBayesNet::shared_ptr Rc1 = Ab1->eliminateSequential(ordering_,
EliminateQR);
initialize(Rc1, Ab2);
}
/**************************************************************************************************/ /**************************************************************************************************/
// deprecated variants
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1, SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1,
const GaussianFactorGraph &Ab2, const Parameters &parameters, const GaussianFactorGraph &Ab2,
const Ordering& ordering) : const Parameters &parameters)
parameters_(parameters), ordering_(ordering) { : SubgraphSolver(Rc1, boost::make_shared<GaussianFactorGraph>(Ab2),
initialize(Rc1, boost::make_shared<GaussianFactorGraph>(Ab2)); parameters) {}
}
SubgraphSolver::SubgraphSolver(const GaussianFactorGraph &Ab1,
const GaussianFactorGraph &Ab2,
const Parameters &parameters,
const Ordering &ordering)
: SubgraphSolver(Ab1, boost::make_shared<GaussianFactorGraph>(Ab2),
parameters, ordering) {}
#endif
/**************************************************************************************************/ /**************************************************************************************************/
SubgraphSolver::SubgraphSolver(const GaussianBayesNet::shared_ptr &Rc1, VectorValues SubgraphSolver::optimize() const {
const GaussianFactorGraph::shared_ptr &Ab2, const Parameters &parameters,
const Ordering& ordering) :
parameters_(parameters), ordering_(ordering) {
initialize(Rc1, Ab2);
}
/**************************************************************************************************/
VectorValues SubgraphSolver::optimize() {
VectorValues ybar = conjugateGradients<SubgraphPreconditioner, VectorValues, VectorValues ybar = conjugateGradients<SubgraphPreconditioner, VectorValues,
Errors>(*pc_, pc_->zero(), parameters_); Errors>(*pc_, pc_->zero(), parameters_);
return pc_->x(ybar); return pc_->x(ybar);
} }
/**************************************************************************************************/
VectorValues SubgraphSolver::optimize(const VectorValues &initial) {
// the initial is ignored in this case ...
return optimize();
}
/**************************************************************************************************/
void SubgraphSolver::initialize(const GaussianFactorGraph &jfg) {
GaussianFactorGraph::shared_ptr Ab1 =
boost::make_shared<GaussianFactorGraph>(), Ab2 = boost::make_shared<
GaussianFactorGraph>();
boost::tie(Ab1, Ab2) = splitGraph(jfg);
if (parameters_.verbosity())
cout << "Split A into (A1) " << Ab1->size() << " and (A2) " << Ab2->size()
<< " factors" << endl;
GaussianBayesNet::shared_ptr Rc1 = Ab1->eliminateSequential(ordering_,
EliminateQR);
VectorValues::shared_ptr xbar = boost::make_shared<VectorValues>(
Rc1->optimize());
pc_ = boost::make_shared<SubgraphPreconditioner>(Ab2, Rc1, xbar);
}
/**************************************************************************************************/
void SubgraphSolver::initialize(const GaussianBayesNet::shared_ptr &Rc1,
const GaussianFactorGraph::shared_ptr &Ab2) {
VectorValues::shared_ptr xbar = boost::make_shared<VectorValues>(
Rc1->optimize());
pc_ = boost::make_shared<SubgraphPreconditioner>(Ab2, Rc1, xbar);
}
/**************************************************************************************************/
boost::tuple<GaussianFactorGraph::shared_ptr, GaussianFactorGraph::shared_ptr> //
SubgraphSolver::splitGraph(const GaussianFactorGraph &jfg) {
const VariableIndex index(jfg);
const size_t n = index.size();
DSFVector D(n);
GaussianFactorGraph::shared_ptr At(new GaussianFactorGraph());
GaussianFactorGraph::shared_ptr Ac(new GaussianFactorGraph());
size_t t = 0;
for ( const GaussianFactor::shared_ptr &gf: jfg ) {
if (gf->keys().size() > 2) {
throw runtime_error(
"SubgraphSolver::splitGraph the graph is not simple, sanity check failed ");
}
bool augment = false;
/* check whether this factor should be augmented to the "tree" graph */
if (gf->keys().size() == 1)
augment = true;
else {
const Key u = gf->keys()[0], v = gf->keys()[1], u_root = D.find(u),
v_root = D.find(v);
if (u_root != v_root) {
t++;
augment = true;
D.merge(u_root, v_root);
}
}
if (augment)
At->push_back(gf);
else
Ac->push_back(gf);
}
return boost::tie(At, Ac);
}
/****************************************************************************/
VectorValues SubgraphSolver::optimize(const GaussianFactorGraph &gfg, VectorValues SubgraphSolver::optimize(const GaussianFactorGraph &gfg,
const KeyInfo &keyInfo, const std::map<Key, Vector> &lambda, const KeyInfo &keyInfo, const map<Key, Vector> &lambda,
const VectorValues &initial) { const VectorValues &initial) {
return VectorValues(); return VectorValues();
} }
/**************************************************************************************************/
pair<GaussianFactorGraph::shared_ptr, GaussianFactorGraph::shared_ptr> //
SubgraphSolver::splitGraph(const GaussianFactorGraph &factorGraph) {
/* identify the subgraph structure */
const SubgraphBuilder builder(parameters_.builderParams);
auto subgraph = builder(factorGraph);
/* build factor subgraph */
return splitFactorGraph(factorGraph, subgraph);
}
/****************************************************************************/
} // \namespace gtsam } // \namespace gtsam

View File

@ -20,6 +20,10 @@
#pragma once #pragma once
#include <gtsam/linear/ConjugateGradientSolver.h> #include <gtsam/linear/ConjugateGradientSolver.h>
#include <gtsam/linear/SubgraphBuilder.h>
#include <map>
#include <utility> // pair
namespace gtsam { namespace gtsam {
@ -28,30 +32,36 @@ class GaussianFactorGraph;
class GaussianBayesNet; class GaussianBayesNet;
class SubgraphPreconditioner; class SubgraphPreconditioner;
class GTSAM_EXPORT SubgraphSolverParameters: public ConjugateGradientParameters { struct GTSAM_EXPORT SubgraphSolverParameters
public: : public ConjugateGradientParameters {
typedef ConjugateGradientParameters Base; SubgraphBuilderParameters builderParams;
SubgraphSolverParameters() : explicit SubgraphSolverParameters(const SubgraphBuilderParameters &p = SubgraphBuilderParameters())
Base() { : builderParams(p) {}
} void print() const { Base::print(); }
void print() const {
Base::print();
}
virtual void print(std::ostream &os) const { virtual void print(std::ostream &os) const {
Base::print(os); Base::print(os);
} }
}; };
/** /**
* This class implements the SPCG solver presented in Dellaert et al in IROS'10. * This class implements the linear SPCG solver presented in Dellaert et al in
* IROS'10.
* *
* Given a linear least-squares problem \f$ f(x) = |A x - b|^2 \f$. We split the problem into * Given a linear least-squares problem \f$ f(x) = |A x - b|^2 \f$. We split the
* \f$ f(x) = |A_t - b_t|^2 + |A_c - b_c|^2 \f$ where \f$ A_t \f$ denotes the "tree" part, and \f$ A_c \f$ denotes the "constraint" part. * problem into \f$ f(x) = |A_t - b_t|^2 + |A_c - b_c|^2 \f$ where \f$ A_t \f$
* \f$ A_t \f$ is factorized into \f$ Q_t R_t \f$, and we compute \f$ c_t = Q_t^{-1} b_t \f$, and \f$ x_t = R_t^{-1} c_t \f$ accordingly. * denotes the "tree" part, and \f$ A_c \f$ denotes the "constraint" part.
* Then we solve a reparametrized problem \f$ f(y) = |y|^2 + |A_c R_t^{-1} y - \bar{b_y}|^2 \f$, where \f$ y = R_t(x - x_t) \f$, and \f$ \bar{b_y} = (b_c - A_c x_t) \f$
* *
* In the matrix form, it is equivalent to solving \f$ [A_c R_t^{-1} ; I ] y = [\bar{b_y} ; 0] \f$. We can solve it * \f$A_t \f$ is factorized into \f$ Q_t R_t \f$, and we compute
* with the least-squares variation of the conjugate gradient method. * \f$ c_t = Q_t^{-1} b_t \f$, and \f$ x_t = R_t^{-1} c_t \f$ accordingly.
*
* Then we solve a reparametrized problem
* \f$ f(y) = |y|^2 + |A_c R_t^{-1} y -\bar{b_y}|^2 \f$,
* where \f$ y = R_t(x - x_t) \f$, and \f$ \bar{b_y} = (b_c - A_c x_t) \f$
*
* In the matrix form, it is equivalent to solving
* \f$ [A_c R_t^{-1} ; I ] y = [\bar{b_y} ; 0] \f$.
* We can solve it with the least-squares variation of the conjugate gradient
* method.
* *
* To use it in nonlinear optimization, please see the following example * To use it in nonlinear optimization, please see the following example
* *
@ -63,72 +73,86 @@ public:
* *
* \nosubgrouping * \nosubgrouping
*/ */
class GTSAM_EXPORT SubgraphSolver: public IterativeSolver { class GTSAM_EXPORT SubgraphSolver : public IterativeSolver {
public:
public:
typedef SubgraphSolverParameters Parameters; typedef SubgraphSolverParameters Parameters;
protected: protected:
Parameters parameters_; Parameters parameters_;
Ordering ordering_;
boost::shared_ptr<SubgraphPreconditioner> pc_; ///< preconditioner object boost::shared_ptr<SubgraphPreconditioner> pc_; ///< preconditioner object
public: public:
/// @name Constructors
/// Given a gaussian factor graph, split it into a spanning tree (A1) + others (A2) for SPCG /// @{
/**
* Given a gaussian factor graph, split it into a spanning tree (A1) + others
* (A2) for SPCG Will throw exception if there are ternary factors or higher
* arity, as we use Kruskal's algorithm to split the graph, treating binary
* factors as edges.
*/
SubgraphSolver(const GaussianFactorGraph &A, const Parameters &parameters, SubgraphSolver(const GaussianFactorGraph &A, const Parameters &parameters,
const Ordering& ordering); const Ordering &ordering);
/// Shared pointer version
SubgraphSolver(const boost::shared_ptr<GaussianFactorGraph> &A,
const Parameters &parameters, const Ordering& ordering);
/** /**
* The user specify the subgraph part and the constraint part * The user specifies the subgraph part and the constraints part.
* may throw exception if A1 is underdetermined * May throw exception if A1 is underdetermined. An ordering is required to
* eliminate Ab1. We take Ab1 as a const reference, as it will be transformed
* into Rc1, but take Ab2 as a shared pointer as we need to keep it around.
*/ */
SubgraphSolver(const GaussianFactorGraph &Ab1, const GaussianFactorGraph &Ab2, SubgraphSolver(const GaussianFactorGraph &Ab1,
const Parameters &parameters, const Ordering& ordering);
/// Shared pointer version
SubgraphSolver(const boost::shared_ptr<GaussianFactorGraph> &Ab1,
const boost::shared_ptr<GaussianFactorGraph> &Ab2, const boost::shared_ptr<GaussianFactorGraph> &Ab2,
const Parameters &parameters, const Ordering& ordering); const Parameters &parameters, const Ordering &ordering);
/**
/* The same as above, but the A1 is solved before */ * The same as above, but we assume A1 was solved by caller.
SubgraphSolver(const boost::shared_ptr<GaussianBayesNet> &Rc1, * We take two shared pointers as we keep both around.
const GaussianFactorGraph &Ab2, const Parameters &parameters, */
const Ordering& ordering);
/// Shared pointer version
SubgraphSolver(const boost::shared_ptr<GaussianBayesNet> &Rc1, SubgraphSolver(const boost::shared_ptr<GaussianBayesNet> &Rc1,
const boost::shared_ptr<GaussianFactorGraph> &Ab2, const boost::shared_ptr<GaussianFactorGraph> &Ab2,
const Parameters &parameters, const Ordering& ordering); const Parameters &parameters);
/// Destructor /// Destructor
virtual ~SubgraphSolver() { virtual ~SubgraphSolver() {}
}
/// @}
/// @name Implement interface
/// @{
/// Optimize from zero /// Optimize from zero
VectorValues optimize(); VectorValues optimize() const;
/// Optimize from given initial values /// Interface that IterativeSolver subclasses have to implement
VectorValues optimize(const VectorValues &initial); VectorValues optimize(const GaussianFactorGraph &gfg,
const KeyInfo &keyInfo,
const std::map<Key, Vector> &lambda,
const VectorValues &initial) override;
/** Interface that IterativeSolver subclasses have to implement */ /// @}
virtual VectorValues optimize(const GaussianFactorGraph &gfg, /// @name Implement interface
const KeyInfo &keyInfo, const std::map<Key, Vector> &lambda, /// @{
const VectorValues &initial);
protected: /// Split graph using Kruskal algorithm, treating binary factors as edges.
std::pair < boost::shared_ptr<GaussianFactorGraph>,
boost::shared_ptr<GaussianFactorGraph> > splitGraph(
const GaussianFactorGraph &gfg);
void initialize(const GaussianFactorGraph &jfg); /// @}
void initialize(const boost::shared_ptr<GaussianBayesNet> &Rc1,
const boost::shared_ptr<GaussianFactorGraph> &Ab2);
boost::tuple<boost::shared_ptr<GaussianFactorGraph>, #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
boost::shared_ptr<GaussianFactorGraph> > /// @name Deprecated
splitGraph(const GaussianFactorGraph &gfg); /// @{
SubgraphSolver(const boost::shared_ptr<GaussianFactorGraph> &A,
const Parameters &parameters, const Ordering &ordering)
: SubgraphSolver(*A, parameters, ordering) {}
SubgraphSolver(const GaussianFactorGraph &, const GaussianFactorGraph &,
const Parameters &, const Ordering &);
SubgraphSolver(const boost::shared_ptr<GaussianFactorGraph> &Ab1,
const boost::shared_ptr<GaussianFactorGraph> &Ab2,
const Parameters &parameters, const Ordering &ordering)
: SubgraphSolver(*Ab1, Ab2, parameters, ordering) {}
SubgraphSolver(const boost::shared_ptr<GaussianBayesNet> &,
const GaussianFactorGraph &, const Parameters &);
/// @}
#endif
}; };
} // namespace gtsam } // namespace gtsam

View File

@ -51,7 +51,7 @@ namespace gtsam {
Key key; Key key;
size_t n; size_t n;
boost::tie(key, n) = v; boost::tie(key, n) = v;
values_.insert(make_pair(key, x.segment(j, n))); values_.emplace(key, x.segment(j, n));
j += n; j += n;
} }
} }
@ -60,7 +60,7 @@ namespace gtsam {
VectorValues::VectorValues(const Vector& x, const Scatter& scatter) { VectorValues::VectorValues(const Vector& x, const Scatter& scatter) {
size_t j = 0; size_t j = 0;
for (const SlotEntry& v : scatter) { for (const SlotEntry& v : scatter) {
values_.insert(make_pair(v.key, x.segment(j, v.dimension))); values_.emplace(v.key, x.segment(j, v.dimension));
j += v.dimension; j += v.dimension;
} }
} }
@ -70,14 +70,12 @@ namespace gtsam {
{ {
VectorValues result; VectorValues result;
for(const KeyValuePair& v: other) for(const KeyValuePair& v: other)
result.values_.insert(make_pair(v.first, Vector::Zero(v.second.size()))); result.values_.emplace(v.first, Vector::Zero(v.second.size()));
return result; return result;
} }
/* ************************************************************************* */ /* ************************************************************************* */
VectorValues::iterator VectorValues::insert(const std::pair<Key, Vector>& key_value) { VectorValues::iterator VectorValues::insert(const std::pair<Key, Vector>& key_value) {
// Note that here we accept a pair with a reference to the Vector, but the Vector is copied as
// it is inserted into the values_ map.
std::pair<iterator, bool> result = values_.insert(key_value); std::pair<iterator, bool> result = values_.insert(key_value);
if(!result.second) if(!result.second)
throw std::invalid_argument( throw std::invalid_argument(
@ -86,6 +84,16 @@ namespace gtsam {
return result.first; return result.first;
} }
/* ************************************************************************* */
VectorValues::iterator VectorValues::emplace(Key j, const Vector& value) {
std::pair<iterator, bool> result = values_.emplace(j, value);
if(!result.second)
throw std::invalid_argument(
"Requested to emplace variable '" + DefaultKeyFormatter(j)
+ "' already in this VectorValues.");
return result.first;
}
/* ************************************************************************* */ /* ************************************************************************* */
void VectorValues::update(const VectorValues& values) void VectorValues::update(const VectorValues& values)
{ {
@ -132,8 +140,7 @@ namespace gtsam {
bool VectorValues::equals(const VectorValues& x, double tol) const { bool VectorValues::equals(const VectorValues& x, double tol) const {
if(this->size() != x.size()) if(this->size() != x.size())
return false; return false;
typedef boost::tuple<value_type, value_type> ValuePair; for(const auto& values: boost::combine(*this, x)) {
for(const ValuePair& values: boost::combine(*this, x)) {
if(values.get<0>().first != values.get<1>().first || if(values.get<0>().first != values.get<1>().first ||
!equal_with_abs_tol(values.get<0>().second, values.get<1>().second, tol)) !equal_with_abs_tol(values.get<0>().second, values.get<1>().second, tol))
return false; return false;
@ -142,17 +149,15 @@ namespace gtsam {
} }
/* ************************************************************************* */ /* ************************************************************************* */
Vector VectorValues::vector() const Vector VectorValues::vector() const {
{
// Count dimensions // Count dimensions
DenseIndex totalDim = 0; DenseIndex totalDim = 0;
for(const Vector& v: *this | map_values) for (const Vector& v : *this | map_values) totalDim += v.size();
totalDim += v.size();
// Copy vectors // Copy vectors
Vector result(totalDim); Vector result(totalDim);
DenseIndex pos = 0; DenseIndex pos = 0;
for(const Vector& v: *this | map_values) { for (const Vector& v : *this | map_values) {
result.segment(pos, v.size()) = v; result.segment(pos, v.size()) = v;
pos += v.size(); pos += v.size();
} }
@ -242,7 +247,7 @@ namespace gtsam {
VectorValues result; VectorValues result;
// The result.end() hint here should result in constant-time inserts // The result.end() hint here should result in constant-time inserts
for(const_iterator j1 = begin(), j2 = c.begin(); j1 != end(); ++j1, ++j2) for(const_iterator j1 = begin(), j2 = c.begin(); j1 != end(); ++j1, ++j2)
result.values_.insert(result.end(), make_pair(j1->first, j1->second + j2->second)); result.values_.emplace(j1->first, j1->second + j2->second);
return result; return result;
} }
@ -300,7 +305,7 @@ namespace gtsam {
VectorValues result; VectorValues result;
// The result.end() hint here should result in constant-time inserts // The result.end() hint here should result in constant-time inserts
for(const_iterator j1 = begin(), j2 = c.begin(); j1 != end(); ++j1, ++j2) for(const_iterator j1 = begin(), j2 = c.begin(); j1 != end(); ++j1, ++j2)
result.values_.insert(result.end(), make_pair(j1->first, j1->second - j2->second)); result.values_.emplace(j1->first, j1->second - j2->second);
return result; return result;
} }
@ -316,7 +321,7 @@ namespace gtsam {
{ {
VectorValues result; VectorValues result;
for(const VectorValues::KeyValuePair& key_v: v) for(const VectorValues::KeyValuePair& key_v: v)
result.values_.insert(result.values_.end(), make_pair(key_v.first, a * key_v.second)); result.values_.emplace(key_v.first, a * key_v.second);
return result; return result;
} }

View File

@ -50,9 +50,9 @@ namespace gtsam {
* Example: * Example:
* \code * \code
VectorValues values; VectorValues values;
values.insert(3, Vector3(1.0, 2.0, 3.0)); values.emplace(3, Vector3(1.0, 2.0, 3.0));
values.insert(4, Vector2(4.0, 5.0)); values.emplace(4, Vector2(4.0, 5.0));
values.insert(0, (Vector(4) << 6.0, 7.0, 8.0, 9.0).finished()); values.emplace(0, (Vector(4) << 6.0, 7.0, 8.0, 9.0).finished());
// Prints [ 3.0 4.0 ] // Prints [ 3.0 4.0 ]
gtsam::print(values[1]); gtsam::print(values[1]);
@ -64,18 +64,7 @@ namespace gtsam {
* *
* <h2>Advanced Interface and Performance Information</h2> * <h2>Advanced Interface and Performance Information</h2>
* *
* For advanced usage, or where speed is important: * Access is through the variable Key j, and returns a SubVector,
* - Allocate space ahead of time using a pre-allocating constructor
* (\ref AdvancedConstructors "Advanced Constructors"), Zero(),
* SameStructure(), resize(), or append(). Do not use
* insert(Key, const Vector&), which always has to re-allocate the
* internal vector.
* - The vector() function permits access to the underlying Vector, for
* doing mathematical or other operations that require all values.
* - operator[]() returns a SubVector view of the underlying Vector,
* without copying any data.
*
* Access is through the variable index j, and returns a SubVector,
* which is a view on the underlying data structure. * which is a view on the underlying data structure.
* *
* This class is additionally used in gradient descent and dog leg to store the gradient. * This class is additionally used in gradient descent and dog leg to store the gradient.
@ -183,15 +172,21 @@ namespace gtsam {
* j is already used. * j is already used.
* @param value The vector to be inserted. * @param value The vector to be inserted.
* @param j The index with which the value will be associated. */ * @param j The index with which the value will be associated. */
iterator insert(Key j, const Vector& value) { iterator insert(const std::pair<Key, Vector>& key_value);
return insert(std::make_pair(j, value));
} /** Emplace a vector \c value with key \c j. Throws an invalid_argument exception if the key \c
* j is already used.
* @param value The vector to be inserted.
* @param j The index with which the value will be associated. */
iterator emplace(Key j, const Vector& value);
/** Insert a vector \c value with key \c j. Throws an invalid_argument exception if the key \c /** Insert a vector \c value with key \c j. Throws an invalid_argument exception if the key \c
* j is already used. * j is already used.
* @param value The vector to be inserted. * @param value The vector to be inserted.
* @param j The index with which the value will be associated. */ * @param j The index with which the value will be associated. */
iterator insert(const std::pair<Key, Vector>& key_value); iterator insert(Key j, const Vector& value) {
return insert(std::make_pair(j, value));
}
/** Insert all values from \c values. Throws an invalid_argument exception if any keys to be /** Insert all values from \c values. Throws an invalid_argument exception if any keys to be
* inserted are already used. */ * inserted are already used. */
@ -202,7 +197,8 @@ namespace gtsam {
* exist, it is inserted and an iterator pointing to the new element, along with 'true', is * exist, it is inserted and an iterator pointing to the new element, along with 'true', is
* returned. */ * returned. */
std::pair<iterator, bool> tryInsert(Key j, const Vector& value) { std::pair<iterator, bool> tryInsert(Key j, const Vector& value) {
return values_.insert(std::make_pair(j, value)); } return values_.emplace(j, value);
}
/** Erase the vector with the given key, or throw std::out_of_range if it does not exist */ /** Erase the vector with the given key, or throw std::out_of_range if it does not exist */
void erase(Key var) { void erase(Key var) {

View File

@ -56,7 +56,7 @@ namespace gtsam
myData.parentData = parentData; myData.parentData = parentData;
// Take any ancestor results we'll need // Take any ancestor results we'll need
for(Key parent: clique->conditional_->parents()) for(Key parent: clique->conditional_->parents())
myData.cliqueResults.insert(std::make_pair(parent, myData.parentData->cliqueResults.at(parent))); myData.cliqueResults.emplace(parent, myData.parentData->cliqueResults.at(parent));
// Solve and store in our results // Solve and store in our results
{ {
@ -87,10 +87,10 @@ namespace gtsam
// This is because Eigen (as of 3.3) no longer evaluates S * xS into // This is because Eigen (as of 3.3) no longer evaluates S * xS into
// a temporary, and the operation trashes valus in xS. // a temporary, and the operation trashes valus in xS.
// See: http://eigen.tuxfamily.org/index.php?title=3.3 // See: http://eigen.tuxfamily.org/index.php?title=3.3
const Vector rhs = c.getb() - c.get_S() * xS; const Vector rhs = c.getb() - c.S() * xS;
// TODO(gareth): Inline instantiation of Eigen::Solve and check flag // TODO(gareth): Inline instantiation of Eigen::Solve and check flag
const Vector solution = c.get_R().triangularView<Eigen::Upper>().solve(rhs); const Vector solution = c.R().triangularView<Eigen::Upper>().solve(rhs);
// Check for indeterminant solution // Check for indeterminant solution
if(solution.hasNaN()) throw IndeterminantLinearSystemException(c.keys().front()); if(solution.hasNaN()) throw IndeterminantLinearSystemException(c.keys().front());
@ -99,8 +99,8 @@ namespace gtsam
DenseIndex vectorPosition = 0; DenseIndex vectorPosition = 0;
for(GaussianConditional::const_iterator frontal = c.beginFrontals(); frontal != c.endFrontals(); ++frontal) { for(GaussianConditional::const_iterator frontal = c.beginFrontals(); frontal != c.endFrontals(); ++frontal) {
VectorValues::const_iterator r = VectorValues::const_iterator r =
collectedResult.insert(*frontal, solution.segment(vectorPosition, c.getDim(frontal))); collectedResult.emplace(*frontal, solution.segment(vectorPosition, c.getDim(frontal)));
myData.cliqueResults.insert(make_pair(r->first, r)); myData.cliqueResults.emplace(r->first, r);
vectorPosition += c.getDim(frontal); vectorPosition += c.getDim(frontal);
} }
} }

View File

@ -21,6 +21,8 @@
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam/base/numericalDerivative.h> #include <gtsam/base/numericalDerivative.h>
#include <CppUnitLite/TestHarness.h>
#include <boost/tuple/tuple.hpp>
#include <boost/assign/list_of.hpp> #include <boost/assign/list_of.hpp>
#include <boost/assign/std/list.hpp> // for operator += #include <boost/assign/std/list.hpp> // for operator +=
using namespace boost::assign; using namespace boost::assign;
@ -28,13 +30,11 @@ using namespace boost::assign;
// STL/C++ // STL/C++
#include <iostream> #include <iostream>
#include <sstream> #include <sstream>
#include <CppUnitLite/TestHarness.h>
#include <boost/tuple/tuple.hpp>
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
static const Key _x_=0, _y_=1; static const Key _x_ = 11, _y_ = 22, _z_ = 33;
static GaussianBayesNet smallBayesNet = static GaussianBayesNet smallBayesNet =
list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1))( list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1))(
@ -42,9 +42,9 @@ static GaussianBayesNet smallBayesNet =
static GaussianBayesNet noisyBayesNet = static GaussianBayesNet noisyBayesNet =
list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1, list_of(GaussianConditional(_x_, Vector1::Constant(9), I_1x1, _y_, I_1x1,
noiseModel::Diagonal::Sigmas(Vector1::Constant(2))))( noiseModel::Isotropic::Sigma(1, 2.0)))(
GaussianConditional(_y_, Vector1::Constant(5), I_1x1, GaussianConditional(_y_, Vector1::Constant(5), I_1x1,
noiseModel::Diagonal::Sigmas(Vector1::Constant(3)))); noiseModel::Isotropic::Sigma(1, 3.0)));
/* ************************************************************************* */ /* ************************************************************************* */
TEST( GaussianBayesNet, Matrix ) TEST( GaussianBayesNet, Matrix )
@ -136,6 +136,37 @@ TEST( GaussianBayesNet, optimize3 )
EXPECT(assert_equal(expected, actual)); EXPECT(assert_equal(expected, actual));
} }
/* ************************************************************************* */
TEST(GaussianBayesNet, ordering)
{
Ordering expected;
expected += _x_, _y_;
const auto actual = noisyBayesNet.ordering();
EXPECT(assert_equal(expected, actual));
}
/* ************************************************************************* */
TEST( GaussianBayesNet, MatrixStress )
{
GaussianBayesNet bn;
using GC = GaussianConditional;
bn.emplace_shared<GC>(_x_, Vector2(1, 2), 1 * I_2x2, _y_, 2 * I_2x2, _z_, 3 * I_2x2);
bn.emplace_shared<GC>(_y_, Vector2(3, 4), 4 * I_2x2, _z_, 5 * I_2x2);
bn.emplace_shared<GC>(_z_, Vector2(5, 6), 6 * I_2x2);
const VectorValues expected = bn.optimize();
for (const auto keys :
{KeyVector({_x_, _y_, _z_}), KeyVector({_x_, _z_, _y_}),
KeyVector({_y_, _x_, _z_}), KeyVector({_y_, _z_, _x_}),
KeyVector({_z_, _x_, _y_}), KeyVector({_z_, _y_, _x_})}) {
const Ordering ordering(keys);
Matrix R;
Vector d;
boost::tie(R, d) = bn.matrix(ordering);
EXPECT(assert_equal(expected.vector(ordering), R.inverse() * d));
}
}
/* ************************************************************************* */ /* ************************************************************************* */
TEST( GaussianBayesNet, backSubstituteTranspose ) TEST( GaussianBayesNet, backSubstituteTranspose )
{ {
@ -152,6 +183,34 @@ TEST( GaussianBayesNet, backSubstituteTranspose )
VectorValues actual = smallBayesNet.backSubstituteTranspose(x); VectorValues actual = smallBayesNet.backSubstituteTranspose(x);
EXPECT(assert_equal(expected, actual)); EXPECT(assert_equal(expected, actual));
const auto ordering = noisyBayesNet.ordering();
const Matrix R = smallBayesNet.matrix(ordering).first;
const Vector expected_vector = R.transpose().inverse() * x.vector(ordering);
EXPECT(assert_equal(expected_vector, actual.vector(ordering)));
}
/* ************************************************************************* */
TEST( GaussianBayesNet, backSubstituteTransposeNoisy )
{
// x=R'*y, expected=inv(R')*x
// 2 = 1 2
// 5 1 1 3
VectorValues
x = map_list_of<Key, Vector>
(_x_, Vector1::Constant(2))
(_y_, Vector1::Constant(5)),
expected = map_list_of<Key, Vector>
(_x_, Vector1::Constant(4))
(_y_, Vector1::Constant(9));
VectorValues actual = noisyBayesNet.backSubstituteTranspose(x);
EXPECT(assert_equal(expected, actual));
const auto ordering = noisyBayesNet.ordering();
const Matrix R = noisyBayesNet.matrix(ordering).first;
const Vector expected_vector = R.transpose().inverse() * x.vector(ordering);
EXPECT(assert_equal(expected_vector, actual.vector(ordering)));
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -71,33 +71,33 @@ TEST(GaussianConditional, constructor)
GaussianConditional::const_iterator it = actual.beginFrontals(); GaussianConditional::const_iterator it = actual.beginFrontals();
EXPECT(assert_equal(Key(1), *it)); EXPECT(assert_equal(Key(1), *it));
EXPECT(assert_equal(R, actual.get_R())); EXPECT(assert_equal(R, actual.R()));
++ it; ++ it;
EXPECT(it == actual.endFrontals()); EXPECT(it == actual.endFrontals());
it = actual.beginParents(); it = actual.beginParents();
EXPECT(assert_equal(Key(3), *it)); EXPECT(assert_equal(Key(3), *it));
EXPECT(assert_equal(S1, actual.get_S(it))); EXPECT(assert_equal(S1, actual.S(it)));
++ it; ++ it;
EXPECT(assert_equal(Key(5), *it)); EXPECT(assert_equal(Key(5), *it));
EXPECT(assert_equal(S2, actual.get_S(it))); EXPECT(assert_equal(S2, actual.S(it)));
++ it; ++ it;
EXPECT(assert_equal(Key(7), *it)); EXPECT(assert_equal(Key(7), *it));
EXPECT(assert_equal(S3, actual.get_S(it))); EXPECT(assert_equal(S3, actual.S(it)));
++it; ++it;
EXPECT(it == actual.endParents()); EXPECT(it == actual.endParents());
EXPECT(assert_equal(d, actual.get_d())); EXPECT(assert_equal(d, actual.d()));
EXPECT(assert_equal(*s, *actual.get_model())); EXPECT(assert_equal(*s, *actual.get_model()));
// test copy constructor // test copy constructor
GaussianConditional copied(actual); GaussianConditional copied(actual);
EXPECT(assert_equal(d, copied.get_d())); EXPECT(assert_equal(d, copied.d()));
EXPECT(assert_equal(*s, *copied.get_model())); EXPECT(assert_equal(*s, *copied.get_model()));
EXPECT(assert_equal(R, copied.get_R())); EXPECT(assert_equal(R, copied.R()));
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -212,7 +212,7 @@ TEST( GaussianConditional, solve_multifrontal )
// 3 variables, all dim=2 // 3 variables, all dim=2
GaussianConditional cg(list_of(1)(2)(10), 2, blockMatrix); GaussianConditional cg(list_of(1)(2)(10), 2, blockMatrix);
EXPECT(assert_equal(Vector(blockMatrix.full().rightCols(1)), cg.get_d())); EXPECT(assert_equal(Vector(blockMatrix.full().rightCols(1)), cg.d()));
// partial solution // partial solution
Vector sl1 = Vector2(9.0, 10.0); Vector sl1 = Vector2(9.0, 10.0);

View File

@ -33,7 +33,7 @@ TEST(GaussianDensity, constructor)
GaussianConditional conditional(1, d, R, noiseModel::Diagonal::Sigmas(s)); GaussianConditional conditional(1, d, R, noiseModel::Diagonal::Sigmas(s));
GaussianDensity copied(conditional); GaussianDensity copied(conditional);
EXPECT(assert_equal(d, copied.get_d())); EXPECT(assert_equal(d, copied.d()));
EXPECT(assert_equal(s, copied.get_model()->sigmas())); EXPECT(assert_equal(s, copied.get_model()->sigmas()));
} }

View File

@ -146,7 +146,7 @@ TEST(JacobianFactor, constructors_and_accessors)
/* ************************************************************************* */ /* ************************************************************************* */
TEST(JabobianFactor, Hessian_conversion) { TEST(JabobianFactor, Hessian_conversion) {
HessianFactor hessian(0, (Matrix(4,4) << HessianFactor hessian(0, (Matrix(4, 4) <<
1.57, 2.695, -1.1, -2.35, 1.57, 2.695, -1.1, -2.35,
2.695, 11.3125, -0.65, -10.225, 2.695, 11.3125, -0.65, -10.225,
-1.1, -0.65, 1, 0.5, -1.1, -0.65, 1, 0.5,
@ -154,7 +154,7 @@ TEST(JabobianFactor, Hessian_conversion) {
(Vector(4) << -7.885, -28.5175, 2.75, 25.675).finished(), (Vector(4) << -7.885, -28.5175, 2.75, 25.675).finished(),
73.1725); 73.1725);
JacobianFactor expected(0, (Matrix(2,4) << JacobianFactor expected(0, (Matrix(2, 4) <<
1.2530, 2.1508, -0.8779, -1.8755, 1.2530, 2.1508, -0.8779, -1.8755,
0, 2.5858, 0.4789, -2.3943).finished(), 0, 2.5858, 0.4789, -2.3943).finished(),
Vector2(-6.2929, -5.7941)); Vector2(-6.2929, -5.7941));
@ -162,6 +162,27 @@ TEST(JabobianFactor, Hessian_conversion) {
EXPECT(assert_equal(expected, JacobianFactor(hessian), 1e-3)); EXPECT(assert_equal(expected, JacobianFactor(hessian), 1e-3));
} }
/* ************************************************************************* */
TEST(JabobianFactor, Hessian_conversion2) {
JacobianFactor jf(0, (Matrix(3, 3) <<
1, 2, 3,
0, 2, 3,
0, 0, 3).finished(),
Vector3(1, 2, 2));
HessianFactor hessian(jf);
EXPECT(assert_equal(jf, JacobianFactor(hessian), 1e-9));
}
/* ************************************************************************* */
TEST(JabobianFactor, Hessian_conversion3) {
JacobianFactor jf(0, (Matrix(2, 4) <<
1, 2, 3, 0,
0, 3, 2, 1).finished(),
Vector2(1, 2));
HessianFactor hessian(jf);
EXPECT(assert_equal(jf, JacobianFactor(hessian), 1e-9));
}
/* ************************************************************************* */ /* ************************************************************************* */
namespace simple_graph { namespace simple_graph {
@ -322,27 +343,30 @@ TEST(JacobianFactor, matrices)
/* ************************************************************************* */ /* ************************************************************************* */
TEST(JacobianFactor, operators ) TEST(JacobianFactor, operators )
{ {
SharedDiagonal sigma0_1 = noiseModel::Isotropic::Sigma(2,0.1); const double sigma = 0.1;
SharedDiagonal sigma0_1 = noiseModel::Isotropic::Sigma(2, sigma);
Matrix I = I_2x2; Matrix I = I_2x2;
Vector b = Vector2(0.2,-0.1); Vector b = Vector2(0.2,-0.1);
JacobianFactor lf(1, -I, 2, I, b, sigma0_1); JacobianFactor lf(1, -I, 2, I, b, sigma0_1);
VectorValues c; VectorValues x;
c.insert(1, Vector2(10.,20.)); Vector2 x1(10,20), x2(30,60);
c.insert(2, Vector2(30.,60.)); x.insert(1, x1);
x.insert(2, x2);
// test A*x // test A*x
Vector expectedE = Vector2(200.,400.); Vector expectedE = (x2 - x1)/sigma;
Vector actualE = lf * c; Vector actualE = lf * x;
EXPECT(assert_equal(expectedE, actualE)); EXPECT(assert_equal(expectedE, actualE));
// test A^e // test A^e
VectorValues expectedX; VectorValues expectedX;
expectedX.insert(1, Vector2(-2000.,-4000.)); const double alpha = 0.5;
expectedX.insert(2, Vector2(2000., 4000.)); expectedX.insert(1, - alpha * expectedE /sigma);
expectedX.insert(2, alpha * expectedE /sigma);
VectorValues actualX = VectorValues::Zero(expectedX); VectorValues actualX = VectorValues::Zero(expectedX);
lf.transposeMultiplyAdd(1.0, actualE, actualX); lf.transposeMultiplyAdd(alpha, actualE, actualX);
EXPECT(assert_equal(expectedX, actualX)); EXPECT(assert_equal(expectedX, actualX));
// test gradient at zero // test gradient at zero

View File

@ -52,6 +52,11 @@ void PreintegratedImuMeasurements::resetIntegration() {
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
void PreintegratedImuMeasurements::integrateMeasurement( void PreintegratedImuMeasurements::integrateMeasurement(
const Vector3& measuredAcc, const Vector3& measuredOmega, double dt) { const Vector3& measuredAcc, const Vector3& measuredOmega, double dt) {
if (dt <= 0) {
throw std::runtime_error(
"PreintegratedImuMeasurements::integrateMeasurement: dt <=0");
}
// Update preintegrated measurements (also get Jacobian) // Update preintegrated measurements (also get Jacobian)
Matrix9 A; // overall Jacobian wrt preintegrated measurements (df/dx) Matrix9 A; // overall Jacobian wrt preintegrated measurements (df/dx)
Matrix93 B, C; Matrix93 B, C;

View File

@ -49,7 +49,7 @@ PreintegratedImuMeasurements ScenarioRunner::integrate(
NavState ScenarioRunner::predict(const PreintegratedImuMeasurements& pim, NavState ScenarioRunner::predict(const PreintegratedImuMeasurements& pim,
const Bias& estimatedBias) const { const Bias& estimatedBias) const {
const NavState state_i(scenario_->pose(0), scenario_->velocity_n(0)); const NavState state_i(scenario_.pose(0), scenario_.velocity_n(0));
return pim.predict(state_i, estimatedBias); return pim.predict(state_i, estimatedBias);
} }

View File

@ -42,7 +42,7 @@ class ScenarioRunner {
typedef boost::shared_ptr<PreintegratedImuMeasurements::Params> SharedParams; typedef boost::shared_ptr<PreintegratedImuMeasurements::Params> SharedParams;
private: private:
const Scenario* scenario_; const Scenario& scenario_;
const SharedParams p_; const SharedParams p_;
const double imuSampleTime_, sqrt_dt_; const double imuSampleTime_, sqrt_dt_;
const Bias estimatedBias_; const Bias estimatedBias_;
@ -51,7 +51,7 @@ class ScenarioRunner {
mutable Sampler gyroSampler_, accSampler_; mutable Sampler gyroSampler_, accSampler_;
public: public:
ScenarioRunner(const Scenario* scenario, const SharedParams& p, ScenarioRunner(const Scenario& scenario, const SharedParams& p,
double imuSampleTime = 1.0 / 100.0, const Bias& bias = Bias()) double imuSampleTime = 1.0 / 100.0, const Bias& bias = Bias())
: scenario_(scenario), : scenario_(scenario),
p_(p), p_(p),
@ -68,13 +68,13 @@ class ScenarioRunner {
// A gyro simply measures angular velocity in body frame // A gyro simply measures angular velocity in body frame
Vector3 actualAngularVelocity(double t) const { Vector3 actualAngularVelocity(double t) const {
return scenario_->omega_b(t); return scenario_.omega_b(t);
} }
// An accelerometer measures acceleration in body, but not gravity // An accelerometer measures acceleration in body, but not gravity
Vector3 actualSpecificForce(double t) const { Vector3 actualSpecificForce(double t) const {
Rot3 bRn(scenario_->rotation(t).transpose()); Rot3 bRn(scenario_.rotation(t).transpose());
return scenario_->acceleration_b(t) - bRn * gravity_n(); return scenario_.acceleration_b(t) - bRn * gravity_n();
} }
// versions corrupted by bias and noise // versions corrupted by bias and noise
@ -104,6 +104,15 @@ class ScenarioRunner {
/// Estimate covariance of sampled noise for sanity-check /// Estimate covariance of sampled noise for sanity-check
Matrix6 estimateNoiseCovariance(size_t N = 1000) const; Matrix6 estimateNoiseCovariance(size_t N = 1000) const;
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
/// @name Deprecated
/// @{
ScenarioRunner(const Scenario* scenario, const SharedParams& p,
double imuSampleTime = 1.0 / 100.0, const Bias& bias = Bias())
: ScenarioRunner(*scenario, p, imuSampleTime, bias) {}
/// @}
#endif
}; };
} // namespace gtsam } // namespace gtsam

View File

@ -52,7 +52,7 @@ TEST(ScenarioRunner, Spin) {
const ConstantTwistScenario scenario(W, V); const ConstantTwistScenario scenario(W, V);
auto p = defaultParams(); auto p = defaultParams();
ScenarioRunner runner(&scenario, p, kDt); ScenarioRunner runner(scenario, p, kDt);
const double T = 2 * kDt; // seconds const double T = 2 * kDt; // seconds
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
@ -80,7 +80,7 @@ ConstantTwistScenario scenario(Z_3x1, Vector3(v, 0, 0));
/* ************************************************************************* */ /* ************************************************************************* */
TEST(ScenarioRunner, Forward) { TEST(ScenarioRunner, Forward) {
using namespace forward; using namespace forward;
ScenarioRunner runner(&scenario, defaultParams(), kDt); ScenarioRunner runner(scenario, defaultParams(), kDt);
const double T = 0.1; // seconds const double T = 0.1; // seconds
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
@ -94,7 +94,7 @@ TEST(ScenarioRunner, Forward) {
/* ************************************************************************* */ /* ************************************************************************* */
TEST(ScenarioRunner, ForwardWithBias) { TEST(ScenarioRunner, ForwardWithBias) {
using namespace forward; using namespace forward;
ScenarioRunner runner(&scenario, defaultParams(), kDt); ScenarioRunner runner(scenario, defaultParams(), kDt);
const double T = 0.1; // seconds const double T = 0.1; // seconds
auto pim = runner.integrate(T, kNonZeroBias); auto pim = runner.integrate(T, kNonZeroBias);
@ -108,7 +108,7 @@ TEST(ScenarioRunner, Circle) {
const double v = 2, w = 6 * kDegree; const double v = 2, w = 6 * kDegree;
ConstantTwistScenario scenario(Vector3(0, 0, w), Vector3(v, 0, 0)); ConstantTwistScenario scenario(Vector3(0, 0, w), Vector3(v, 0, 0));
ScenarioRunner runner(&scenario, defaultParams(), kDt); ScenarioRunner runner(scenario, defaultParams(), kDt);
const double T = 0.1; // seconds const double T = 0.1; // seconds
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
@ -126,7 +126,7 @@ TEST(ScenarioRunner, Loop) {
const double v = 2, w = 6 * kDegree; const double v = 2, w = 6 * kDegree;
ConstantTwistScenario scenario(Vector3(0, -w, 0), Vector3(v, 0, 0)); ConstantTwistScenario scenario(Vector3(0, -w, 0), Vector3(v, 0, 0));
ScenarioRunner runner(&scenario, defaultParams(), kDt); ScenarioRunner runner(scenario, defaultParams(), kDt);
const double T = 0.1; // seconds const double T = 0.1; // seconds
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
@ -157,7 +157,7 @@ const double T = 3; // seconds
/* ************************************************************************* */ /* ************************************************************************* */
TEST(ScenarioRunner, Accelerating) { TEST(ScenarioRunner, Accelerating) {
using namespace accelerating; using namespace accelerating;
ScenarioRunner runner(&scenario, defaultParams(), T / 10); ScenarioRunner runner(scenario, defaultParams(), T / 10);
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9)); EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
@ -170,7 +170,7 @@ TEST(ScenarioRunner, Accelerating) {
/* ************************************************************************* */ /* ************************************************************************* */
TEST(ScenarioRunner, AcceleratingWithBias) { TEST(ScenarioRunner, AcceleratingWithBias) {
using namespace accelerating; using namespace accelerating;
ScenarioRunner runner(&scenario, defaultParams(), T / 10, kNonZeroBias); ScenarioRunner runner(scenario, defaultParams(), T / 10, kNonZeroBias);
auto pim = runner.integrate(T, kNonZeroBias); auto pim = runner.integrate(T, kNonZeroBias);
Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias); Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias);
@ -185,7 +185,7 @@ TEST(ScenarioRunner, AcceleratingAndRotating) {
const AcceleratingScenario scenario(nRb, P0, V0, A, W); const AcceleratingScenario scenario(nRb, P0, V0, A, W);
const double T = 10; // seconds const double T = 10; // seconds
ScenarioRunner runner(&scenario, defaultParams(), T / 10); ScenarioRunner runner(scenario, defaultParams(), T / 10);
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9)); EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
@ -216,7 +216,7 @@ const double T = 3; // seconds
/* ************************************************************************* */ /* ************************************************************************* */
TEST(ScenarioRunner, Accelerating2) { TEST(ScenarioRunner, Accelerating2) {
using namespace accelerating2; using namespace accelerating2;
ScenarioRunner runner(&scenario, defaultParams(), T / 10); ScenarioRunner runner(scenario, defaultParams(), T / 10);
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9)); EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
@ -229,7 +229,7 @@ TEST(ScenarioRunner, Accelerating2) {
/* ************************************************************************* */ /* ************************************************************************* */
TEST(ScenarioRunner, AcceleratingWithBias2) { TEST(ScenarioRunner, AcceleratingWithBias2) {
using namespace accelerating2; using namespace accelerating2;
ScenarioRunner runner(&scenario, defaultParams(), T / 10, kNonZeroBias); ScenarioRunner runner(scenario, defaultParams(), T / 10, kNonZeroBias);
auto pim = runner.integrate(T, kNonZeroBias); auto pim = runner.integrate(T, kNonZeroBias);
Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias); Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias);
@ -244,7 +244,7 @@ TEST(ScenarioRunner, AcceleratingAndRotating2) {
const AcceleratingScenario scenario(nRb, P0, V0, A, W); const AcceleratingScenario scenario(nRb, P0, V0, A, W);
const double T = 10; // seconds const double T = 10; // seconds
ScenarioRunner runner(&scenario, defaultParams(), T / 10); ScenarioRunner runner(scenario, defaultParams(), T / 10);
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9)); EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
@ -276,7 +276,7 @@ const double T = 3; // seconds
/* ************************************************************************* */ /* ************************************************************************* */
TEST(ScenarioRunner, Accelerating3) { TEST(ScenarioRunner, Accelerating3) {
using namespace accelerating3; using namespace accelerating3;
ScenarioRunner runner(&scenario, defaultParams(), T / 10); ScenarioRunner runner(scenario, defaultParams(), T / 10);
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9)); EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
@ -289,7 +289,7 @@ TEST(ScenarioRunner, Accelerating3) {
/* ************************************************************************* */ /* ************************************************************************* */
TEST(ScenarioRunner, AcceleratingWithBias3) { TEST(ScenarioRunner, AcceleratingWithBias3) {
using namespace accelerating3; using namespace accelerating3;
ScenarioRunner runner(&scenario, defaultParams(), T / 10, kNonZeroBias); ScenarioRunner runner(scenario, defaultParams(), T / 10, kNonZeroBias);
auto pim = runner.integrate(T, kNonZeroBias); auto pim = runner.integrate(T, kNonZeroBias);
Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias); Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias);
@ -304,7 +304,7 @@ TEST(ScenarioRunner, AcceleratingAndRotating3) {
const AcceleratingScenario scenario(nRb, P0, V0, A, W); const AcceleratingScenario scenario(nRb, P0, V0, A, W);
const double T = 10; // seconds const double T = 10; // seconds
ScenarioRunner runner(&scenario, defaultParams(), T / 10); ScenarioRunner runner(scenario, defaultParams(), T / 10);
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9)); EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
@ -337,7 +337,7 @@ const double T = 3; // seconds
/* ************************************************************************* */ /* ************************************************************************* */
TEST(ScenarioRunner, Accelerating4) { TEST(ScenarioRunner, Accelerating4) {
using namespace accelerating4; using namespace accelerating4;
ScenarioRunner runner(&scenario, defaultParams(), T / 10); ScenarioRunner runner(scenario, defaultParams(), T / 10);
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9)); EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
@ -350,7 +350,7 @@ TEST(ScenarioRunner, Accelerating4) {
/* ************************************************************************* */ /* ************************************************************************* */
TEST(ScenarioRunner, AcceleratingWithBias4) { TEST(ScenarioRunner, AcceleratingWithBias4) {
using namespace accelerating4; using namespace accelerating4;
ScenarioRunner runner(&scenario, defaultParams(), T / 10, kNonZeroBias); ScenarioRunner runner(scenario, defaultParams(), T / 10, kNonZeroBias);
auto pim = runner.integrate(T, kNonZeroBias); auto pim = runner.integrate(T, kNonZeroBias);
Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias); Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias);
@ -365,7 +365,7 @@ TEST(ScenarioRunner, AcceleratingAndRotating4) {
const AcceleratingScenario scenario(nRb, P0, V0, A, W); const AcceleratingScenario scenario(nRb, P0, V0, A, W);
const double T = 10; // seconds const double T = 10; // seconds
ScenarioRunner runner(&scenario, defaultParams(), T / 10); ScenarioRunner runner(scenario, defaultParams(), T / 10);
auto pim = runner.integrate(T); auto pim = runner.integrate(T);
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9)); EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));

View File

@ -37,11 +37,11 @@ public:
VERBOSE VERBOSE
}; };
double deltaInitial; ///< The initial trust region radius (default: 1.0) double deltaInitial; ///< The initial trust region radius (default: 10.0)
VerbosityDL verbosityDL; ///< The verbosity level for Dogleg (default: SILENT), see also NonlinearOptimizerParams::verbosity VerbosityDL verbosityDL; ///< The verbosity level for Dogleg (default: SILENT), see also NonlinearOptimizerParams::verbosity
DoglegParams() : DoglegParams() :
deltaInitial(1.0), verbosityDL(SILENT) {} deltaInitial(10.0), verbosityDL(SILENT) {}
virtual ~DoglegParams() {} virtual ~DoglegParams() {}
@ -105,9 +105,9 @@ public:
/** Virtual destructor */ /** Virtual destructor */
virtual ~DoglegOptimizer() {} virtual ~DoglegOptimizer() {}
/** Perform a single iteration, returning a new NonlinearOptimizer class /**
* containing the updated variable assignments, which may be retrieved with * Perform a single iteration, returning GaussianFactorGraph corresponding to
* values(). * the linearized factor graph.
*/ */
GaussianFactorGraph::shared_ptr iterate() override; GaussianFactorGraph::shared_ptr iterate() override;

View File

@ -70,9 +70,9 @@ public:
/** Virtual destructor */ /** Virtual destructor */
virtual ~GaussNewtonOptimizer() {} virtual ~GaussNewtonOptimizer() {}
/** Perform a single iteration, returning a new NonlinearOptimizer class /**
* containing the updated variable assignments, which may be retrieved with * Perform a single iteration, returning GaussianFactorGraph corresponding to
* values(). * the linearized factor graph.
*/ */
GaussianFactorGraph::shared_ptr iterate() override; GaussianFactorGraph::shared_ptr iterate() override;

View File

@ -246,8 +246,8 @@ void updateRgProd(const ISAM2::sharedClique& clique, const KeySet& replacedKeys,
clique->conditional()->endParents())); clique->conditional()->endParents()));
// Compute R*g and S*g for this clique // Compute R*g and S*g for this clique
Vector RSgProd = clique->conditional()->get_R() * gR + Vector RSgProd = clique->conditional()->R() * gR +
clique->conditional()->get_S() * gS; clique->conditional()->S() * gS;
// Write into RgProd vector // Write into RgProd vector
DenseIndex vectorPosition = 0; DenseIndex vectorPosition = 0;

View File

@ -96,7 +96,7 @@ bool ISAM2::equals(const ISAM2& other, double tol) const {
} }
/* ************************************************************************* */ /* ************************************************************************* */
KeySet ISAM2::getAffectedFactors(const KeyList& keys) const { FactorIndexSet ISAM2::getAffectedFactors(const KeyList& keys) const {
static const bool debug = false; static const bool debug = false;
if (debug) cout << "Getting affected factors for "; if (debug) cout << "Getting affected factors for ";
if (debug) { if (debug) {
@ -106,15 +106,14 @@ KeySet ISAM2::getAffectedFactors(const KeyList& keys) const {
} }
if (debug) cout << endl; if (debug) cout << endl;
NonlinearFactorGraph allAffected; FactorIndexSet indices;
KeySet indices;
for (const Key key : keys) { for (const Key key : keys) {
const VariableIndex::Factors& factors(variableIndex_[key]); const VariableIndex::Factors& factors(variableIndex_[key]);
indices.insert(factors.begin(), factors.end()); indices.insert(factors.begin(), factors.end());
} }
if (debug) cout << "Affected factors are: "; if (debug) cout << "Affected factors are: ";
if (debug) { if (debug) {
for (const size_t index : indices) { for (const auto index : indices) {
cout << index << " "; cout << index << " ";
} }
} }
@ -132,8 +131,6 @@ GaussianFactorGraph::shared_ptr ISAM2::relinearizeAffectedFactors(
KeySet candidates = getAffectedFactors(affectedKeys); KeySet candidates = getAffectedFactors(affectedKeys);
gttoc(getAffectedFactors); gttoc(getAffectedFactors);
NonlinearFactorGraph nonlinearAffectedFactors;
gttic(affectedKeysSet); gttic(affectedKeysSet);
// for fast lookup below // for fast lookup below
KeySet affectedKeysSet; KeySet affectedKeysSet;
@ -589,7 +586,7 @@ ISAM2Result ISAM2::update(
// Remove the removed factors // Remove the removed factors
NonlinearFactorGraph removeFactors; NonlinearFactorGraph removeFactors;
removeFactors.reserve(removeFactorIndices.size()); removeFactors.reserve(removeFactorIndices.size());
for (size_t index : removeFactorIndices) { for (const auto index : removeFactorIndices) {
removeFactors.push_back(nonlinearFactors_[index]); removeFactors.push_back(nonlinearFactors_[index]);
nonlinearFactors_.remove(index); nonlinearFactors_.remove(index);
if (params_.cacheLinearizedFactors) linearFactors_.remove(index); if (params_.cacheLinearizedFactors) linearFactors_.remove(index);
@ -823,7 +820,7 @@ void ISAM2::marginalizeLeaves(
KeySet leafKeysRemoved; KeySet leafKeysRemoved;
// Keep track of factors that get summarized by removing cliques // Keep track of factors that get summarized by removing cliques
KeySet factorIndicesToRemove; FactorIndexSet factorIndicesToRemove;
// Remove the subtree and throw away the cliques // Remove the subtree and throw away the cliques
auto trackingRemoveSubtree = [&](const sharedClique& subtreeRoot) { auto trackingRemoveSubtree = [&](const sharedClique& subtreeRoot) {
@ -937,8 +934,8 @@ void ISAM2::marginalizeLeaves(
} }
} }
// Create factor graph from factor indices // Create factor graph from factor indices
for (size_t i : factorsFromMarginalizedInClique_step1) { for (const auto index: factorsFromMarginalizedInClique_step1) {
graph.push_back(nonlinearFactors_[i]->linearize(theta_)); graph.push_back(nonlinearFactors_[index]->linearize(theta_));
} }
// Reeliminate the linear graph to get the marginal and discard the // Reeliminate the linear graph to get the marginal and discard the
@ -1011,10 +1008,10 @@ void ISAM2::marginalizeLeaves(
// Remove the factors to remove that have been summarized in the newly-added // Remove the factors to remove that have been summarized in the newly-added
// marginal factors // marginal factors
NonlinearFactorGraph removedFactors; NonlinearFactorGraph removedFactors;
for (size_t i : factorIndicesToRemove) { for (const auto index: factorIndicesToRemove) {
removedFactors.push_back(nonlinearFactors_[i]); removedFactors.push_back(nonlinearFactors_[index]);
nonlinearFactors_.remove(i); nonlinearFactors_.remove(index);
if (params_.cacheLinearizedFactors) linearFactors_.remove(i); if (params_.cacheLinearizedFactors) linearFactors_.remove(index);
} }
variableIndex_.remove(factorIndicesToRemove.begin(), variableIndex_.remove(factorIndicesToRemove.begin(),
factorIndicesToRemove.end(), removedFactors); factorIndicesToRemove.end(), removedFactors);

View File

@ -292,7 +292,7 @@ class GTSAM_EXPORT ISAM2 : public BayesTree<ISAM2Clique> {
*/ */
void expmapMasked(const KeySet& mask); void expmapMasked(const KeySet& mask);
FastSet<Key> getAffectedFactors(const FastList<Key>& keys) const; FactorIndexSet getAffectedFactors(const FastList<Key>& keys) const;
GaussianFactorGraph::shared_ptr relinearizeAffectedFactors( GaussianFactorGraph::shared_ptr relinearizeAffectedFactors(
const FastList<Key>& affectedKeys, const KeySet& relinKeys) const; const FastList<Key>& affectedKeys, const KeySet& relinKeys) const;
GaussianFactorGraph getCachedBoundaryFactors(const Cliques& orphans); GaussianFactorGraph getCachedBoundaryFactors(const Cliques& orphans);

View File

@ -37,9 +37,9 @@ void ISAM2Clique::setEliminationResult(
gradientContribution_.resize(conditional_->cols() - 1); gradientContribution_.resize(conditional_->cols() - 1);
// Rewrite -(R * P')'*d as -(d' * R * P')' for computational speed // Rewrite -(R * P')'*d as -(d' * R * P')' for computational speed
// reasons // reasons
gradientContribution_ << -conditional_->get_R().transpose() * gradientContribution_ << -conditional_->R().transpose() *
conditional_->get_d(), conditional_->d(),
-conditional_->get_S().transpose() * conditional_->get_d(); -conditional_->S().transpose() * conditional_->d();
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -141,8 +141,8 @@ void ISAM2Clique::fastBackSubstitute(VectorValues* delta) const {
// This is because Eigen (as of 3.3) no longer evaluates S * xS into // This is because Eigen (as of 3.3) no longer evaluates S * xS into
// a temporary, and the operation trashes valus in xS. // a temporary, and the operation trashes valus in xS.
// See: http://eigen.tuxfamily.org/index.php?title=3.3 // See: http://eigen.tuxfamily.org/index.php?title=3.3
const Vector rhs = c.getb() - c.get_S() * xS; const Vector rhs = c.getb() - c.S() * xS;
const Vector solution = c.get_R().triangularView<Eigen::Upper>().solve(rhs); const Vector solution = c.R().triangularView<Eigen::Upper>().solve(rhs);
// Check for indeterminant solution // Check for indeterminant solution
if (solution.hasNaN()) if (solution.hasNaN())
@ -284,7 +284,7 @@ size_t optimizeWildfireNonRecursive(const ISAM2Clique::shared_ptr& root,
/* ************************************************************************* */ /* ************************************************************************* */
void ISAM2Clique::nnz_internal(size_t* result) const { void ISAM2Clique::nnz_internal(size_t* result) const {
size_t dimR = conditional_->rows(); size_t dimR = conditional_->rows();
size_t dimSep = conditional_->get_S().cols(); size_t dimSep = conditional_->S().cols();
*result += ((dimR + 1) * dimR) / 2 + dimSep * dimR; *result += ((dimR + 1) * dimR) / 2 + dimSep * dimR;
// traverse the children // traverse the children
for (const auto& child : children) { for (const auto& child : children) {

View File

@ -31,8 +31,6 @@
namespace gtsam { namespace gtsam {
typedef FastVector<size_t> FactorIndices;
/** /**
* @addtogroup ISAM2 * @addtogroup ISAM2
* This struct is returned from ISAM2::update() and contains information about * This struct is returned from ISAM2::update() and contains information about

View File

@ -94,9 +94,9 @@ public:
/// @name Advanced interface /// @name Advanced interface
/// @{ /// @{
/** Perform a single iteration, returning a new NonlinearOptimizer class /**
* containing the updated variable assignments, which may be retrieved with * Perform a single iteration, returning GaussianFactorGraph corresponding to
* values(). * the linearized factor graph.
*/ */
GaussianFactorGraph::shared_ptr iterate() override; GaussianFactorGraph::shared_ptr iterate() override;

View File

@ -70,7 +70,16 @@ public:
virtual ~NonlinearConjugateGradientOptimizer() { virtual ~NonlinearConjugateGradientOptimizer() {
} }
/**
* Perform a single iteration, returning GaussianFactorGraph corresponding to
* the linearized factor graph.
*/
GaussianFactorGraph::shared_ptr iterate() override; GaussianFactorGraph::shared_ptr iterate() override;
/**
* Optimize for the maximum-likelihood estimate, returning a the optimized
* variable assignments.
*/
const Values& optimize() override; const Values& optimize() override;
}; };

Some files were not shown because too many files have changed in this diff Show More