Merge branch 'develop' into feature/NoiseModelFactorN
#1102 correctly removed "GTSAM_EXPORT" from NoiseModelFactorXrelease/4.3a0
commit
8ae8c7ac52
|
@ -71,6 +71,7 @@ function configure()
|
|||
-DGTSAM_USE_SYSTEM_EIGEN=${GTSAM_USE_SYSTEM_EIGEN:-OFF} \
|
||||
-DGTSAM_USE_SYSTEM_METIS=${GTSAM_USE_SYSTEM_METIS:-OFF} \
|
||||
-DGTSAM_BUILD_WITH_MARCH_NATIVE=OFF \
|
||||
-DGTSAM_SINGLE_TEST_EXE=ON \
|
||||
-DBOOST_ROOT=$BOOST_ROOT \
|
||||
-DBoost_NO_SYSTEM_PATHS=ON \
|
||||
-DBoost_ARCHITECTURE=-x64
|
||||
|
@ -95,7 +96,11 @@ function build ()
|
|||
configure
|
||||
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
make -j$(nproc)
|
||||
if (($(nproc) > 2)); then
|
||||
make -j$(nproc)
|
||||
else
|
||||
make -j2
|
||||
fi
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu)
|
||||
fi
|
||||
|
@ -113,7 +118,11 @@ function test ()
|
|||
|
||||
# Actual testing
|
||||
if [ "$(uname)" == "Linux" ]; then
|
||||
make -j$(nproc) check
|
||||
if (($(nproc) > 2)); then
|
||||
make -j$(nproc) check
|
||||
else
|
||||
make -j2 check
|
||||
fi
|
||||
elif [ "$(uname)" == "Darwin" ]; then
|
||||
make -j$(sysctl -n hw.physicalcpu) check
|
||||
fi
|
||||
|
|
|
@ -48,7 +48,9 @@ jobs:
|
|||
- name: Install Dependencies
|
||||
shell: powershell
|
||||
run: |
|
||||
Invoke-Expression (New-Object System.Net.WebClient).DownloadString('https://get.scoop.sh')
|
||||
iwr -useb get.scoop.sh -outfile 'install_scoop.ps1'
|
||||
.\install_scoop.ps1 -RunAsAdmin
|
||||
|
||||
scoop install cmake --global # So we don't get issues with CMP0074 policy
|
||||
scoop install ninja --global
|
||||
|
||||
|
@ -106,6 +108,21 @@ jobs:
|
|||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target gtsam
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target gtsam_unstable
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target wrap
|
||||
|
||||
# Run GTSAM tests
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.basis
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.discrete
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.geometry
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.inference
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.linear
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.navigation
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.nonlinear
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.sam
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.sfm
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.slam
|
||||
cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.symbolic
|
||||
|
||||
# Run GTSAM_UNSTABLE tests
|
||||
#cmake --build build -j 4 --config ${{ matrix.build_type }} --target check.base_unstable
|
||||
|
||||
|
|
|
@ -17,3 +17,4 @@
|
|||
# for QtCreator:
|
||||
CMakeLists.txt.user*
|
||||
xcode/
|
||||
/Dockerfile
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
project(GTSAM CXX C)
|
||||
cmake_minimum_required(VERSION 3.0)
|
||||
|
||||
# new feature to Cmake Version > 2.8.12
|
||||
|
@ -11,7 +10,7 @@ endif()
|
|||
set (GTSAM_VERSION_MAJOR 4)
|
||||
set (GTSAM_VERSION_MINOR 2)
|
||||
set (GTSAM_VERSION_PATCH 0)
|
||||
set (GTSAM_PRERELEASE_VERSION "a4")
|
||||
set (GTSAM_PRERELEASE_VERSION "a6")
|
||||
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
|
||||
|
||||
if (${GTSAM_VERSION_PATCH} EQUAL 0)
|
||||
|
@ -19,6 +18,11 @@ if (${GTSAM_VERSION_PATCH} EQUAL 0)
|
|||
else()
|
||||
set (GTSAM_VERSION_STRING "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}${GTSAM_PRERELEASE_VERSION}")
|
||||
endif()
|
||||
|
||||
project(GTSAM
|
||||
LANGUAGES CXX C
|
||||
VERSION "${GTSAM_VERSION_MAJOR}.${GTSAM_VERSION_MINOR}.${GTSAM_VERSION_PATCH}")
|
||||
|
||||
message(STATUS "GTSAM Version: ${GTSAM_VERSION_STRING}")
|
||||
|
||||
set (CMAKE_PROJECT_VERSION_MAJOR ${GTSAM_VERSION_MAJOR})
|
||||
|
|
|
@ -15,7 +15,7 @@ For example:
|
|||
```cpp
|
||||
class GTSAM_EXPORT MyClass { ... };
|
||||
|
||||
GTSAM_EXPORT myFunction();
|
||||
GTSAM_EXPORT return_type myFunction();
|
||||
```
|
||||
|
||||
More details [here](Using-GTSAM-EXPORT.md).
|
||||
|
|
|
@ -13,7 +13,7 @@ $ make install
|
|||
## Important Installation Notes
|
||||
|
||||
1. GTSAM requires the following libraries to be installed on your system:
|
||||
- BOOST version 1.65 or greater (install through Linux repositories or MacPorts). Please see [Boost Notes](#boost-notes).
|
||||
- BOOST version 1.65 or greater (install through Linux repositories or MacPorts). Please see [Boost Notes](#boost-notes) for version recommendations based on your compiler.
|
||||
|
||||
- Cmake version 3.0 or higher
|
||||
- Support for XCode 4.3 command line tools on Mac requires CMake 2.8.8 or higher
|
||||
|
@ -72,7 +72,7 @@ execute commands as follows for an out-of-source build:
|
|||
Versions of Boost prior to 1.65 have a known bug that prevents proper "deep" serialization of objects, which means that objects encapsulated inside other objects don't get serialized.
|
||||
This is particularly seen when using `clang` as the C++ compiler.
|
||||
|
||||
For this reason we require Boost>=1.65, and recommend installing it through alternative channels when it is not available through your operating system's primary package manager.
|
||||
For this reason we recommend Boost>=1.65, and recommend installing it through alternative channels when it is not available through your operating system's primary package manager.
|
||||
|
||||
## Known Issues
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ To create a DLL in windows, the `GTSAM_EXPORT` keyword has been created and need
|
|||
* At least one of the functions inside that class is declared in a .cpp file and not just the .h file.
|
||||
* You can `GTSAM_EXPORT` any class it inherits from as well. (Note that this implictly requires the class does not derive from a "header-only" class. Note that Eigen is a "header-only" library, so if your class derives from Eigen, _do not_ use `GTSAM_EXPORT` in the class definition!)
|
||||
3. If you have defined a class using `GTSAM_EXPORT`, do not use `GTSAM_EXPORT` in any of its individual function declarations. (Note that you _can_ put `GTSAM_EXPORT` in the definition of individual functions within a class as long as you don't put `GTSAM_EXPORT` in the class definition.)
|
||||
4. For template specializations, you need to add `GTSAM_EXPORT` to each individual specialization.
|
||||
|
||||
## When is GTSAM_EXPORT being used incorrectly
|
||||
Unfortunately, using `GTSAM_EXPORT` incorrectly often does not cause a compiler or linker error in the library that is being compiled, but only when you try to use that DLL in a different library. For example, an error in `gtsam/base` will often show up when compiling the `check_base_program` or the MATLAB wrapper, but not when compiling/linking gtsam itself. The most common errors will say something like:
|
||||
|
|
|
@ -93,6 +93,10 @@ if(MSVC)
|
|||
/wd4267 # warning C4267: 'initializing': conversion from 'size_t' to 'int', possible loss of data
|
||||
)
|
||||
|
||||
add_compile_options(/wd4005)
|
||||
add_compile_options(/wd4101)
|
||||
add_compile_options(/wd4834)
|
||||
|
||||
endif()
|
||||
|
||||
# Other (non-preprocessor macros) compiler flags:
|
||||
|
@ -187,7 +191,7 @@ endif()
|
|||
|
||||
if (NOT MSVC)
|
||||
option(GTSAM_BUILD_WITH_MARCH_NATIVE "Enable/Disable building with all instructions supported by native architecture (binary may not be portable!)" ON)
|
||||
if(GTSAM_BUILD_WITH_MARCH_NATIVE)
|
||||
if(GTSAM_BUILD_WITH_MARCH_NATIVE AND (APPLE AND NOT CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64"))
|
||||
# Add as public flag so all dependant projects also use it, as required
|
||||
# by Eigen to avid crashes due to SIMD vectorization:
|
||||
list_append_cache(GTSAM_COMPILE_OPTIONS_PUBLIC "-march=native")
|
||||
|
|
|
@ -21,7 +21,12 @@ if(GTSAM_USE_SYSTEM_METIS)
|
|||
mark_as_advanced(METIS_LIBRARY)
|
||||
|
||||
add_library(metis-gtsam-if INTERFACE)
|
||||
target_include_directories(metis-gtsam-if BEFORE INTERFACE ${METIS_INCLUDE_DIR})
|
||||
target_include_directories(metis-gtsam-if BEFORE INTERFACE ${METIS_INCLUDE_DIR}
|
||||
# gtsam_unstable/partition/FindSeparator-inl.h uses internal metislib.h API
|
||||
# via extern "C"
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
)
|
||||
target_link_libraries(metis-gtsam-if INTERFACE ${METIS_LIBRARY})
|
||||
endif()
|
||||
else()
|
||||
|
@ -30,10 +35,12 @@ else()
|
|||
add_subdirectory(${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis)
|
||||
|
||||
target_include_directories(metis-gtsam BEFORE PUBLIC
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/include>
|
||||
# gtsam_unstable/partition/FindSeparator-inl.h uses internal metislib.h API
|
||||
# via extern "C"
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/libmetis>
|
||||
$<BUILD_INTERFACE:${GTSAM_SOURCE_DIR}/gtsam/3rdparty/metis/GKlib>
|
||||
$<INSTALL_INTERFACE:include/gtsam/3rdparty/metis/>
|
||||
)
|
||||
|
||||
add_library(metis-gtsam-if INTERFACE)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// add unary measurement factors, like GPS, on all three poses
|
||||
noiseModel::Diagonal::shared_ptr unaryNoise =
|
||||
auto unaryNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector2(0.1, 0.1)); // 10cm std on x,y
|
||||
graph.add(boost::make_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise));
|
||||
graph.add(boost::make_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise));
|
||||
graph.add(boost::make_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise));
|
||||
graph.emplace_shared<UnaryFactor>(1, 0.0, 0.0, unaryNoise);
|
||||
graph.emplace_shared<UnaryFactor>(2, 2.0, 0.0, unaryNoise);
|
||||
graph.emplace_shared<UnaryFactor>(3, 4.0, 0.0, unaryNoise);
|
||||
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
class UnaryFactor: public NoiseModelFactor1<Pose2> {
|
||||
double mx_, my_; ///< X and Y measurements
|
||||
|
||||
|
||||
public:
|
||||
UnaryFactor(Key j, double x, double y, const SharedNoiseModel& model):
|
||||
NoiseModelFactor1<Pose2>(model, j), mx_(x), my_(y) {}
|
||||
|
||||
Vector evaluateError(const Pose2& q,
|
||||
boost::optional<Matrix&> H = boost::none) const
|
||||
{
|
||||
Vector evaluateError(const Pose2& q,
|
||||
boost::optional<Matrix&> H = boost::none) const override {
|
||||
const Rot2& R = q.rotation();
|
||||
if (H) (*H) = (gtsam::Matrix(2, 3) <<
|
||||
R.c(), -R.s(), 0.0,
|
||||
|
|
|
@ -3,13 +3,11 @@ NonlinearFactorGraph graph;
|
|||
|
||||
// Add a Gaussian prior on pose x_1
|
||||
Pose2 priorMean(0.0, 0.0, 0.0);
|
||||
noiseModel::Diagonal::shared_ptr priorNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
|
||||
graph.addPrior(1, priorMean, priorNoise);
|
||||
auto priorNoise = noiseModel::Diagonal::Sigmas(Vector3(0.3, 0.3, 0.1));
|
||||
graph.add(PriorFactor<Pose2>(1, priorMean, priorNoise));
|
||||
|
||||
// Add two odometry factors
|
||||
Pose2 odometry(2.0, 0.0, 0.0);
|
||||
noiseModel::Diagonal::shared_ptr odometryNoise =
|
||||
noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
|
||||
auto odometryNoise = noiseModel::Diagonal::Sigmas(Vector3(0.2, 0.2, 0.1));
|
||||
graph.add(BetweenFactor<Pose2>(1, 2, odometry, odometryNoise));
|
||||
graph.add(BetweenFactor<Pose2>(2, 3, odometry, odometryNoise));
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
Factor Graph:
|
||||
size: 3
|
||||
factor 0: PriorFactor on 1
|
||||
prior mean: (0, 0, 0)
|
||||
|
||||
Factor 0: PriorFactor on 1
|
||||
prior mean: (0, 0, 0)
|
||||
noise model: diagonal sigmas [0.3; 0.3; 0.1];
|
||||
factor 1: BetweenFactor(1,2)
|
||||
measured: (2, 0, 0)
|
||||
noise model: diagonal sigmas [0.2; 0.2; 0.1];
|
||||
factor 2: BetweenFactor(2,3)
|
||||
measured: (2, 0, 0)
|
||||
|
||||
Factor 1: BetweenFactor(1,2)
|
||||
measured: (2, 0, 0)
|
||||
noise model: diagonal sigmas [0.2; 0.2; 0.1];
|
||||
|
||||
Factor 2: BetweenFactor(2,3)
|
||||
measured: (2, 0, 0)
|
||||
noise model: diagonal sigmas [0.2; 0.2; 0.1];
|
|
@ -1,11 +1,23 @@
|
|||
Initial Estimate:
|
||||
|
||||
Values with 3 values:
|
||||
Value 1: (0.5, 0, 0.2)
|
||||
Value 2: (2.3, 0.1, -0.2)
|
||||
Value 3: (4.1, 0.1, 0.1)
|
||||
Value 1: (gtsam::Pose2)
|
||||
(0.5, 0, 0.2)
|
||||
|
||||
Value 2: (gtsam::Pose2)
|
||||
(2.3, 0.1, -0.2)
|
||||
|
||||
Value 3: (gtsam::Pose2)
|
||||
(4.1, 0.1, 0.1)
|
||||
|
||||
Final Result:
|
||||
|
||||
Values with 3 values:
|
||||
Value 1: (-1.8e-16, 8.7e-18, -9.1e-19)
|
||||
Value 2: (2, 7.4e-18, -2.5e-18)
|
||||
Value 3: (4, -1.8e-18, -3.1e-18)
|
||||
Value 1: (gtsam::Pose2)
|
||||
(7.5-16, -5.3-16, -1.8-16)
|
||||
|
||||
Value 2: (gtsam::Pose2)
|
||||
(2, -1.1-15, -2.5-16)
|
||||
|
||||
Value 3: (gtsam::Pose2)
|
||||
(4, -1.7-15, -2.5-16)
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
x1 covariance:
|
||||
0.09 1.1e-47 5.7e-33
|
||||
1.1e-47 0.09 1.9e-17
|
||||
5.7e-33 1.9e-17 0.01
|
||||
0.09 1.7e-33 2.8e-33
|
||||
1.7e-33 0.09 2.6e-17
|
||||
2.8e-33 2.6e-17 0.01
|
||||
x2 covariance:
|
||||
0.13 4.7e-18 2.4e-18
|
||||
4.7e-18 0.17 0.02
|
||||
2.4e-18 0.02 0.02
|
||||
0.13 1.2e-18 6.1e-19
|
||||
1.2e-18 0.17 0.02
|
||||
6.1e-19 0.02 0.02
|
||||
x3 covariance:
|
||||
0.17 2.7e-17 8.4e-18
|
||||
2.7e-17 0.37 0.06
|
||||
8.4e-18 0.06 0.03
|
||||
0.17 8.6e-18 2.7e-18
|
||||
8.6e-18 0.37 0.06
|
||||
2.7e-18 0.06 0.03
|
254
doc/gtsam.lyx
254
doc/gtsam.lyx
|
@ -134,14 +134,10 @@ A Hands-on Introduction
|
|||
|
||||
\begin_layout Author
|
||||
Frank Dellaert
|
||||
\begin_inset Newline newline
|
||||
\end_inset
|
||||
|
||||
Technical Report number GT-RIM-CP&R-2014-XXX
|
||||
\end_layout
|
||||
|
||||
\begin_layout Date
|
||||
September 2014
|
||||
Updated Last March 2022
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
|
@ -154,18 +150,14 @@ filename "common_macros.tex"
|
|||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Section*
|
||||
Overview
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
In this document I provide a hands-on introduction to both factor graphs
|
||||
and GTSAM.
|
||||
This is an updated version from the 2012 TR that is tailored to our GTSAM
|
||||
3.0 library and beyond.
|
||||
4.0 library and beyond.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
|
||||
\series bold
|
||||
Factor graphs
|
||||
|
@ -206,7 +198,7 @@ ts or prior knowledge.
|
|||
robotics and vision.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
The GTSAM toolbox (GTSAM stands for
|
||||
\begin_inset Quotes eld
|
||||
\end_inset
|
||||
|
@ -221,11 +213,13 @@ Georgia Tech Smoothing and Mapping
|
|||
It provides state of the art solutions to the SLAM and SFM problems, but
|
||||
can also be used to model and solve both simpler and more complex estimation
|
||||
problems.
|
||||
It also provides a MATLAB interface which allows for rapid prototype developmen
|
||||
t, visualization, and user interaction.
|
||||
It also provides MATLAB and Python wrappers which allow for rapid prototype
|
||||
development, visualization, and user interaction.
|
||||
In addition, it is easy to use in Jupyter notebooks and/or Google's coLaborator
|
||||
y.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_layout Abstract
|
||||
GTSAM exploits sparsity to be computationally efficient.
|
||||
Typically measurements only provide information on the relationship between
|
||||
a handful of variables, and hence the resulting factor graph will be sparsely
|
||||
|
@ -236,14 +230,17 @@ l complexity.
|
|||
GTSAM provides iterative methods that are quite efficient regardless.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
You can download the latest version of GTSAM at
|
||||
\begin_layout Abstract
|
||||
You can download the latest version of GTSAM from GitHub at
|
||||
\end_layout
|
||||
|
||||
\begin_layout Abstract
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
http://tinyurl.com/gtsam
|
||||
https://github.com/borglab/gtsam
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
@ -741,7 +738,7 @@ noindent
|
|||
\begin_inset Formula $f_{0}(x_{1})$
|
||||
\end_inset
|
||||
|
||||
on lines 5-8 as an instance of
|
||||
on lines 5-7 as an instance of
|
||||
\series bold
|
||||
\emph on
|
||||
PriorFactor<T>
|
||||
|
@ -773,7 +770,7 @@ Pose2,
|
|||
noiseModel::Diagonal
|
||||
\series default
|
||||
\emph default
|
||||
by specifying three standard deviations in line 7, respectively 30 cm.
|
||||
by specifying three standard deviations in line 6, respectively 30 cm.
|
||||
\begin_inset space ~
|
||||
\end_inset
|
||||
|
||||
|
@ -795,7 +792,7 @@ Similarly, odometry measurements are specified as
|
|||
Pose2
|
||||
\series default
|
||||
\emph default
|
||||
on line 11, with a slightly different noise model defined on line 12-13.
|
||||
on line 10, with a slightly different noise model defined on line 11.
|
||||
We then add the two factors
|
||||
\begin_inset Formula $f_{1}(x_{1},x_{2};o_{1})$
|
||||
\end_inset
|
||||
|
@ -804,7 +801,7 @@ Pose2
|
|||
\begin_inset Formula $f_{2}(x_{2},x_{3};o_{2})$
|
||||
\end_inset
|
||||
|
||||
on lines 14-15, as instances of yet another templated class,
|
||||
on lines 12-13, as instances of yet another templated class,
|
||||
\series bold
|
||||
\emph on
|
||||
BetweenFactor<T>
|
||||
|
@ -875,7 +872,7 @@ smoothing and mapping
|
|||
|
||||
.
|
||||
Later in this document we will talk about how we can also use GTSAM to
|
||||
do filtering (which you often do
|
||||
do filtering (which often you do
|
||||
\emph on
|
||||
not
|
||||
\emph default
|
||||
|
@ -928,7 +925,11 @@ Values
|
|||
\begin_layout Standard
|
||||
The latter point is often a point of confusion with beginning users of GTSAM.
|
||||
It helps to remember that when designing GTSAM we took a functional approach
|
||||
of classes corresponding to mathematical objects, which are usually immutable.
|
||||
of classes corresponding to mathematical objects, which are usually
|
||||
\emph on
|
||||
immutable
|
||||
\emph default
|
||||
.
|
||||
You should think of a factor graph as a
|
||||
\emph on
|
||||
function
|
||||
|
@ -1027,7 +1028,7 @@ NonlinearFactorGraph
|
|||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The relevant output from running the example is as follows:
|
||||
The relevant output from running the example is as follows:
|
||||
\family typewriter
|
||||
\size small
|
||||
|
||||
|
@ -1363,14 +1364,18 @@ where
|
|||
\end_inset
|
||||
|
||||
is the measurement,
|
||||
\begin_inset Formula $q$
|
||||
\begin_inset Formula $q\in SE(2)$
|
||||
\end_inset
|
||||
|
||||
is the unknown variable,
|
||||
\begin_inset Formula $h(q)$
|
||||
\end_inset
|
||||
|
||||
is a (possibly nonlinear) measurement function, and
|
||||
is a
|
||||
\series bold
|
||||
measurement function
|
||||
\series default
|
||||
, and
|
||||
\begin_inset Formula $\Sigma$
|
||||
\end_inset
|
||||
|
||||
|
@ -1546,7 +1551,7 @@ E(q)\define h(q)-m
|
|||
|
||||
\end_inset
|
||||
|
||||
which is done on line 12.
|
||||
which is done on line 14.
|
||||
Importantly, because we want to use this factor for nonlinear optimization
|
||||
(see e.g.,
|
||||
\begin_inset CommandInset citation
|
||||
|
@ -1599,11 +1604,11 @@ q_{y}
|
|||
\begin_inset Formula $q=\left(q_{x},q_{y},q_{\theta}\right)$
|
||||
\end_inset
|
||||
|
||||
, yields the following simple
|
||||
, yields the following
|
||||
\begin_inset Formula $2\times3$
|
||||
\end_inset
|
||||
|
||||
matrix in tangent space which is the same the as the rotation matrix:
|
||||
matrix:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
|
@ -1618,6 +1623,171 @@ H=\left[\begin{array}{ccc}
|
|||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Paragraph*
|
||||
Important Note
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Many of our users, when attempting to create a custom factor, are initially
|
||||
surprised at the Jacobian matrix not agreeing with their intuition.
|
||||
For example, above you might simply expect a
|
||||
\begin_inset Formula $2\times3$
|
||||
\end_inset
|
||||
|
||||
identity matrix.
|
||||
This
|
||||
\emph on
|
||||
would
|
||||
\emph default
|
||||
be true for variables belonging to a vector space.
|
||||
However, in GTSAM we define the Jacobian more generally to be the matrix
|
||||
|
||||
\begin_inset Formula $H$
|
||||
\end_inset
|
||||
|
||||
such that
|
||||
\begin_inset Formula
|
||||
\[
|
||||
h(q\exp\hat{\xi})\approx h(q)+H\xi
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
where
|
||||
\begin_inset Formula $\xi=(\delta x,\delta y,\delta\theta)$
|
||||
\end_inset
|
||||
|
||||
is an incremental update and
|
||||
\begin_inset Formula $\exp\hat{\xi}$
|
||||
\end_inset
|
||||
|
||||
is the
|
||||
\series bold
|
||||
exponential map
|
||||
\series default
|
||||
for the variable we want to update.
|
||||
In this case
|
||||
\begin_inset Formula $q\in SE(2)$
|
||||
\end_inset
|
||||
|
||||
, where
|
||||
\begin_inset Formula $SE(2)$
|
||||
\end_inset
|
||||
|
||||
is the group of 2D rigid transforms, implemented by
|
||||
\series bold
|
||||
\emph on
|
||||
Pose2
|
||||
\emph default
|
||||
.
|
||||
|
||||
\series default
|
||||
The exponential map for
|
||||
\begin_inset Formula $SE(2)$
|
||||
\end_inset
|
||||
|
||||
can be approximated to first order as
|
||||
\begin_inset Formula
|
||||
\[
|
||||
\exp\hat{\xi}\approx\left[\begin{array}{ccc}
|
||||
1 & -\delta\theta & \delta x\\
|
||||
\delta\theta & 1 & \delta y\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
when using the
|
||||
\begin_inset Formula $3\times3$
|
||||
\end_inset
|
||||
|
||||
matrix representation for 2D poses, and hence
|
||||
\begin_inset Formula
|
||||
\[
|
||||
h(qe^{\hat{\xi}})\approx h\left(\left[\begin{array}{ccc}
|
||||
\cos(q_{\theta}) & -\sin(q_{\theta}) & q_{x}\\
|
||||
\sin(q_{\theta}) & \cos(q_{\theta}) & q_{y}\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]\left[\begin{array}{ccc}
|
||||
1 & -\delta\theta & \delta x\\
|
||||
\delta\theta & 1 & \delta y\\
|
||||
0 & 0 & 1
|
||||
\end{array}\right]\right)=\left[\begin{array}{c}
|
||||
q_{x}+\cos(q_{\theta})\delta x-\sin(q_{\theta})\delta y\\
|
||||
q_{y}+\sin(q_{\theta})\delta x+\cos(q_{\theta})\delta y
|
||||
\end{array}\right]
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
which then explains the Jacobian
|
||||
\begin_inset Formula $H$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Lie groups are very relevant in the robotics context, and you can read more
|
||||
here:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://github.com/borglab/gtsam/blob/develop/doc/LieGroups.pdf
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://github.com/borglab/gtsam/blob/develop/doc/math.pdf
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
In some cases you want to go even beyond Lie groups to a looser concept,
|
||||
|
||||
\series bold
|
||||
manifolds
|
||||
\series default
|
||||
, because not all unknown variables behave like a group, e.g., the space of
|
||||
3D planes, 2D lines, directions in space, etc.
|
||||
For manifolds we do not always have an exponential map, but we have a retractio
|
||||
n that plays the same role.
|
||||
Some of this is explained here:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Itemize
|
||||
\begin_inset Flex URL
|
||||
status open
|
||||
|
||||
\begin_layout Plain Layout
|
||||
|
||||
https://gtsam.org/notes/GTSAM-Concepts.html
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
|
@ -1680,13 +1850,13 @@ UnaryFactor
|
|||
\series default
|
||||
\emph default
|
||||
instances, and add them to graph.
|
||||
GTSAM uses shared pointers to refer to factors in factor graphs, and
|
||||
GTSAM uses shared pointers to refer to factors, and
|
||||
\series bold
|
||||
\emph on
|
||||
boost::make_shared
|
||||
emplace_shared
|
||||
\series default
|
||||
\emph default
|
||||
is a convenience function to simultaneously construct a class and create
|
||||
is a convenience method to simultaneously construct a class and create
|
||||
a
|
||||
\series bold
|
||||
\emph on
|
||||
|
@ -1694,22 +1864,6 @@ shared_ptr
|
|||
\series default
|
||||
\emph default
|
||||
to it.
|
||||
|
||||
\begin_inset Note Note
|
||||
status collapsed
|
||||
|
||||
\begin_layout Plain Layout
|
||||
and on lines 4-6 we add three newly created
|
||||
\series bold
|
||||
\emph on
|
||||
UnaryFactor
|
||||
\series default
|
||||
\emph default
|
||||
instances to the graph.
|
||||
\end_layout
|
||||
|
||||
\end_inset
|
||||
|
||||
We obtain the factor graph from Figure
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand vref
|
||||
|
|
BIN
doc/gtsam.pdf
BIN
doc/gtsam.pdf
Binary file not shown.
|
@ -7,7 +7,7 @@
|
|||
<count>32</count>
|
||||
<item_version>1</item_version>
|
||||
<item class_id="3" tracking_level="0" version="1">
|
||||
<px class_id="4" class_name="JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<px class_id="4" class_name="gtsam::JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<Base class_id="5" tracking_level="0" version="0">
|
||||
<Base class_id="6" tracking_level="0" version="0">
|
||||
<keys_>
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
<count>2</count>
|
||||
<item_version>1</item_version>
|
||||
<item class_id="3" tracking_level="0" version="1">
|
||||
<px class_id="4" class_name="JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<px class_id="4" class_name="gtsam::JacobianFactor" tracking_level="1" version="0" object_id="_0">
|
||||
<Base class_id="5" tracking_level="0" version="0">
|
||||
<Base class_id="6" tracking_level="0" version="0">
|
||||
<keys_>
|
||||
|
|
|
@ -10,62 +10,81 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file RangeISAMExample_plaza1.cpp
|
||||
* @file RangeISAMExample_plaza2.cpp
|
||||
* @brief A 2D Range SLAM example
|
||||
* @date June 20, 2013
|
||||
* @author FRank Dellaert
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
// Both relative poses and recovered trajectory poses will be stored as Pose2 objects
|
||||
// Both relative poses and recovered trajectory poses will be stored as Pose2.
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
using gtsam::Pose2;
|
||||
|
||||
// Each variable in the system (poses and landmarks) must be identified with a unique key.
|
||||
// We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1).
|
||||
// Here we will use Symbols
|
||||
// gtsam::Vectors are dynamic Eigen vectors, Vector3 is statically sized.
|
||||
#include <gtsam/base/Vector.h>
|
||||
using gtsam::Vector;
|
||||
using gtsam::Vector3;
|
||||
|
||||
// Unknown landmarks are of type Point2 (which is just a 2D Eigen vector).
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
using gtsam::Point2;
|
||||
|
||||
// Each variable in the system (poses and landmarks) must be identified with a
|
||||
// unique key. We can either use simple integer keys (1, 2, 3, ...) or symbols
|
||||
// (X1, X2, L1). Here we will use Symbols.
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
using gtsam::Symbol;
|
||||
|
||||
// We want to use iSAM2 to solve the range-SLAM problem incrementally
|
||||
// We want to use iSAM2 to solve the range-SLAM problem incrementally.
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
|
||||
// iSAM2 requires as input a set set of new factors to be added stored in a factor graph,
|
||||
// and initial guesses for any new variables used in the added factors
|
||||
// iSAM2 requires as input a set set of new factors to be added stored in a
|
||||
// factor graph, and initial guesses for any new variables in the added factors.
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
|
||||
// We will use a non-liear solver to batch-inituialize from the first 150 frames
|
||||
// We will use a non-linear solver to batch-initialize from the first 150 frames
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
// In GTSAM, measurement functions are represented as 'factors'. Several common factors
|
||||
// have been provided with the library for solving robotics SLAM problems.
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
// Measurement functions are represented as 'factors'. Several common factors
|
||||
// have been provided with the library for solving robotics SLAM problems:
|
||||
#include <gtsam/sam/RangeFactor.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
// Standard headers, added last, so we know headers above work on their own
|
||||
// Timing, with functions below, provides nice facilities to benchmark.
|
||||
#include <gtsam/base/timing.h>
|
||||
using gtsam::tictoc_print_;
|
||||
|
||||
// Standard headers, added last, so we know headers above work on their own.
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
namespace NM = gtsam::noiseModel;
|
||||
|
||||
// data available at http://www.frc.ri.cmu.edu/projects/emergencyresponse/RangeData/
|
||||
// Datafile format (from http://www.frc.ri.cmu.edu/projects/emergencyresponse/RangeData/log.html)
|
||||
// Data is second UWB ranging dataset, B2 or "plaza 2", from
|
||||
// "Navigating with Ranging Radios: Five Data Sets with Ground Truth"
|
||||
// by Joseph Djugash, Bradley Hamner, and Stephan Roth
|
||||
// https://www.ri.cmu.edu/pub_files/2009/9/Final_5datasetsRangingRadios.pdf
|
||||
|
||||
// load the odometry
|
||||
// DR: Odometry Input (delta distance traveled and delta heading change)
|
||||
// Time (sec) Delta Dist. Trav. (m) Delta Heading (rad)
|
||||
typedef pair<double, Pose2> TimedOdometry;
|
||||
list<TimedOdometry> readOdometry() {
|
||||
list<TimedOdometry> odometryList;
|
||||
string data_file = findExampleDataFile("Plaza2_DR.txt");
|
||||
ifstream is(data_file.c_str());
|
||||
// Time (sec) Delta Distance Traveled (m) Delta Heading (rad)
|
||||
using TimedOdometry = std::pair<double, Pose2>;
|
||||
std::list<TimedOdometry> readOdometry() {
|
||||
std::list<TimedOdometry> odometryList;
|
||||
std::string data_file = gtsam::findExampleDataFile("Plaza2_DR.txt");
|
||||
std::ifstream is(data_file.c_str());
|
||||
|
||||
while (is) {
|
||||
double t, distance_traveled, delta_heading;
|
||||
is >> t >> distance_traveled >> delta_heading;
|
||||
odometryList.push_back(
|
||||
TimedOdometry(t, Pose2(distance_traveled, 0, delta_heading)));
|
||||
odometryList.emplace_back(t, Pose2(distance_traveled, 0, delta_heading));
|
||||
}
|
||||
is.clear(); /* clears the end-of-file and error flags */
|
||||
return odometryList;
|
||||
|
@ -73,90 +92,85 @@ list<TimedOdometry> readOdometry() {
|
|||
|
||||
// load the ranges from TD
|
||||
// Time (sec) Sender / Antenna ID Receiver Node ID Range (m)
|
||||
typedef boost::tuple<double, size_t, double> RangeTriple;
|
||||
vector<RangeTriple> readTriples() {
|
||||
vector<RangeTriple> triples;
|
||||
string data_file = findExampleDataFile("Plaza2_TD.txt");
|
||||
ifstream is(data_file.c_str());
|
||||
using RangeTriple = boost::tuple<double, size_t, double>;
|
||||
std::vector<RangeTriple> readTriples() {
|
||||
std::vector<RangeTriple> triples;
|
||||
std::string data_file = gtsam::findExampleDataFile("Plaza2_TD.txt");
|
||||
std::ifstream is(data_file.c_str());
|
||||
|
||||
while (is) {
|
||||
double t, sender, range;
|
||||
size_t receiver;
|
||||
double t, range, sender, receiver;
|
||||
is >> t >> sender >> receiver >> range;
|
||||
triples.push_back(RangeTriple(t, receiver, range));
|
||||
triples.emplace_back(t, receiver, range);
|
||||
}
|
||||
is.clear(); /* clears the end-of-file and error flags */
|
||||
return triples;
|
||||
}
|
||||
|
||||
// main
|
||||
int main (int argc, char** argv) {
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
// load Plaza2 data
|
||||
list<TimedOdometry> odometry = readOdometry();
|
||||
// size_t M = odometry.size();
|
||||
std::list<TimedOdometry> odometry = readOdometry();
|
||||
size_t M = odometry.size();
|
||||
std::cout << "Read " << M << " odometry entries." << std::endl;
|
||||
|
||||
vector<RangeTriple> triples = readTriples();
|
||||
std::vector<RangeTriple> triples = readTriples();
|
||||
size_t K = triples.size();
|
||||
std::cout << "Read " << K << " range triples." << std::endl;
|
||||
|
||||
// parameters
|
||||
size_t minK = 150; // minimum number of range measurements to process initially
|
||||
size_t incK = 25; // minimum number of range measurements to process after
|
||||
bool groundTruth = false;
|
||||
size_t minK =
|
||||
150; // minimum number of range measurements to process initially
|
||||
size_t incK = 25; // minimum number of range measurements to process after
|
||||
bool robust = true;
|
||||
|
||||
// Set Noise parameters
|
||||
Vector priorSigmas = Vector3(1,1,M_PI);
|
||||
Vector priorSigmas = Vector3(1, 1, M_PI);
|
||||
Vector odoSigmas = Vector3(0.05, 0.01, 0.1);
|
||||
double sigmaR = 100; // range standard deviation
|
||||
const NM::Base::shared_ptr // all same type
|
||||
priorNoise = NM::Diagonal::Sigmas(priorSigmas), //prior
|
||||
odoNoise = NM::Diagonal::Sigmas(odoSigmas), // odometry
|
||||
gaussian = NM::Isotropic::Sigma(1, sigmaR), // non-robust
|
||||
tukey = NM::Robust::Create(NM::mEstimator::Tukey::Create(15), gaussian), //robust
|
||||
rangeNoise = robust ? tukey : gaussian;
|
||||
double sigmaR = 100; // range standard deviation
|
||||
const NM::Base::shared_ptr // all same type
|
||||
priorNoise = NM::Diagonal::Sigmas(priorSigmas), // prior
|
||||
looseNoise = NM::Isotropic::Sigma(2, 1000), // loose LM prior
|
||||
odoNoise = NM::Diagonal::Sigmas(odoSigmas), // odometry
|
||||
gaussian = NM::Isotropic::Sigma(1, sigmaR), // non-robust
|
||||
tukey = NM::Robust::Create(NM::mEstimator::Tukey::Create(15),
|
||||
gaussian), // robust
|
||||
rangeNoise = robust ? tukey : gaussian;
|
||||
|
||||
// Initialize iSAM
|
||||
ISAM2 isam;
|
||||
gtsam::ISAM2 isam;
|
||||
|
||||
// Add prior on first pose
|
||||
Pose2 pose0 = Pose2(-34.2086489999201, 45.3007639991120,
|
||||
M_PI - 2.02108900000000);
|
||||
NonlinearFactorGraph newFactors;
|
||||
Pose2 pose0 = Pose2(-34.2086489999201, 45.3007639991120, M_PI - 2.021089);
|
||||
gtsam::NonlinearFactorGraph newFactors;
|
||||
newFactors.addPrior(0, pose0, priorNoise);
|
||||
Values initial;
|
||||
gtsam::Values initial;
|
||||
initial.insert(0, pose0);
|
||||
|
||||
// initialize points
|
||||
if (groundTruth) { // from TL file
|
||||
initial.insert(symbol('L', 1), Point2(-68.9265, 18.3778));
|
||||
initial.insert(symbol('L', 6), Point2(-37.5805, 69.2278));
|
||||
initial.insert(symbol('L', 0), Point2(-33.6205, 26.9678));
|
||||
initial.insert(symbol('L', 5), Point2(1.7095, -5.8122));
|
||||
} else { // drawn from sigma=1 Gaussian in matlab version
|
||||
initial.insert(symbol('L', 1), Point2(3.5784, 2.76944));
|
||||
initial.insert(symbol('L', 6), Point2(-1.34989, 3.03492));
|
||||
initial.insert(symbol('L', 0), Point2(0.725404, -0.0630549));
|
||||
initial.insert(symbol('L', 5), Point2(0.714743, -0.204966));
|
||||
}
|
||||
// We will initialize landmarks randomly, and keep track of which landmarks we
|
||||
// already added with a set.
|
||||
std::mt19937_64 rng;
|
||||
std::normal_distribution<double> normal(0.0, 100.0);
|
||||
std::set<Symbol> initializedLandmarks;
|
||||
|
||||
// set some loop variables
|
||||
size_t i = 1; // step counter
|
||||
size_t k = 0; // range measurement counter
|
||||
size_t i = 1; // step counter
|
||||
size_t k = 0; // range measurement counter
|
||||
bool initialized = false;
|
||||
Pose2 lastPose = pose0;
|
||||
size_t countK = 0;
|
||||
|
||||
// Loop over odometry
|
||||
gttic_(iSAM);
|
||||
for(const TimedOdometry& timedOdometry: odometry) {
|
||||
//--------------------------------- odometry loop -----------------------------------------
|
||||
for (const TimedOdometry& timedOdometry : odometry) {
|
||||
//--------------------------------- odometry loop --------------------------
|
||||
double t;
|
||||
Pose2 odometry;
|
||||
boost::tie(t, odometry) = timedOdometry;
|
||||
|
||||
// add odometry factor
|
||||
newFactors.push_back(BetweenFactor<Pose2>(i-1, i, odometry, odoNoise));
|
||||
newFactors.emplace_shared<gtsam::BetweenFactor<Pose2>>(i - 1, i, odometry,
|
||||
odoNoise);
|
||||
|
||||
// predict pose and add as initial estimate
|
||||
Pose2 predictedPose = lastPose.compose(odometry);
|
||||
|
@ -166,17 +180,30 @@ int main (int argc, char** argv) {
|
|||
// Check if there are range factors to be added
|
||||
while (k < K && t >= boost::get<0>(triples[k])) {
|
||||
size_t j = boost::get<1>(triples[k]);
|
||||
Symbol landmark_key('L', j);
|
||||
double range = boost::get<2>(triples[k]);
|
||||
newFactors.push_back(RangeFactor<Pose2, Point2>(i, symbol('L', j), range,rangeNoise));
|
||||
newFactors.emplace_shared<gtsam::RangeFactor<Pose2, Point2>>(
|
||||
i, landmark_key, range, rangeNoise);
|
||||
if (initializedLandmarks.count(landmark_key) == 0) {
|
||||
std::cout << "adding landmark " << j << std::endl;
|
||||
double x = normal(rng), y = normal(rng);
|
||||
initial.insert(landmark_key, Point2(x, y));
|
||||
initializedLandmarks.insert(landmark_key);
|
||||
// We also add a very loose prior on the landmark in case there is only
|
||||
// one sighting, which cannot fully determine the landmark.
|
||||
newFactors.emplace_shared<gtsam::PriorFactor<Point2>>(
|
||||
landmark_key, Point2(0, 0), looseNoise);
|
||||
}
|
||||
k = k + 1;
|
||||
countK = countK + 1;
|
||||
}
|
||||
|
||||
// Check whether to update iSAM 2
|
||||
if ((k > minK) && (countK > incK)) {
|
||||
if (!initialized) { // Do a full optimize for first minK ranges
|
||||
if (!initialized) { // Do a full optimize for first minK ranges
|
||||
std::cout << "Initializing at time " << k << std::endl;
|
||||
gttic_(batchInitialization);
|
||||
LevenbergMarquardtOptimizer batchOptimizer(newFactors, initial);
|
||||
gtsam::LevenbergMarquardtOptimizer batchOptimizer(newFactors, initial);
|
||||
initial = batchOptimizer.optimize();
|
||||
gttoc_(batchInitialization);
|
||||
initialized = true;
|
||||
|
@ -185,21 +212,27 @@ int main (int argc, char** argv) {
|
|||
isam.update(newFactors, initial);
|
||||
gttoc_(update);
|
||||
gttic_(calculateEstimate);
|
||||
Values result = isam.calculateEstimate();
|
||||
gtsam::Values result = isam.calculateEstimate();
|
||||
gttoc_(calculateEstimate);
|
||||
lastPose = result.at<Pose2>(i);
|
||||
newFactors = NonlinearFactorGraph();
|
||||
initial = Values();
|
||||
newFactors = gtsam::NonlinearFactorGraph();
|
||||
initial = gtsam::Values();
|
||||
countK = 0;
|
||||
}
|
||||
i += 1;
|
||||
//--------------------------------- odometry loop -----------------------------------------
|
||||
} // end for
|
||||
//--------------------------------- odometry loop --------------------------
|
||||
} // end for
|
||||
gttoc_(iSAM);
|
||||
|
||||
// Print timings
|
||||
tictoc_print_();
|
||||
|
||||
// Print optimized landmarks:
|
||||
gtsam::Values finalResult = isam.calculateEstimate();
|
||||
for (auto&& landmark_key : initializedLandmarks) {
|
||||
Point2 p = finalResult.at<Point2>(landmark_key);
|
||||
std::cout << landmark_key << ":" << p.transpose() << "\n";
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,9 +26,12 @@
|
|||
#include <gtsam/nonlinear/ExpressionFactorGraph.h>
|
||||
|
||||
// Header order is close to far
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/dataset.h> // for loading BAL datasets !
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
@ -46,10 +49,9 @@ int main(int argc, char* argv[]) {
|
|||
if (argc > 1) filename = string(argv[1]);
|
||||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata;
|
||||
readBAL(filename, mydata);
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") %
|
||||
mydata.number_tracks() % mydata.number_cameras();
|
||||
mydata.numberTracks() % mydata.numberCameras();
|
||||
|
||||
// Create a factor graph
|
||||
ExpressionFactorGraph graph;
|
||||
|
|
|
@ -10,17 +10,20 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SFMExample.cpp
|
||||
* @file SFMExample_bal.cpp
|
||||
* @brief Solve a structure-from-motion problem from a "Bundle Adjustment in the Large" file
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
// For an explanation of headers, see SFMExample.cpp
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/dataset.h> // for loading BAL datasets !
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
@ -41,9 +44,8 @@ int main (int argc, char* argv[]) {
|
|||
if (argc>1) filename = string(argv[1]);
|
||||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata;
|
||||
readBAL(filename, mydata);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") % mydata.number_tracks() % mydata.number_cameras();
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") % mydata.numberTracks() % mydata.numberCameras();
|
||||
|
||||
// Create a factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
|
|
|
@ -17,15 +17,16 @@
|
|||
*/
|
||||
|
||||
// For an explanation of headers, see SFMExample.cpp
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/sfm/SfmData.h> // for loading BAL datasets !
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/slam/dataset.h> // for loading BAL datasets !
|
||||
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
@ -45,10 +46,9 @@ int main(int argc, char* argv[]) {
|
|||
if (argc > 1) filename = string(argv[1]);
|
||||
|
||||
// Load the SfM data from file
|
||||
SfmData mydata;
|
||||
readBAL(filename, mydata);
|
||||
SfmData mydata = SfmData::FromBalFile(filename);
|
||||
cout << boost::format("read %1% tracks on %2% cameras\n") %
|
||||
mydata.number_tracks() % mydata.number_cameras();
|
||||
mydata.numberTracks() % mydata.numberCameras();
|
||||
|
||||
// Create a factor graph
|
||||
NonlinearFactorGraph graph;
|
||||
|
@ -131,7 +131,7 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
cout << "Time comparison by solving " << filename << " results:" << endl;
|
||||
cout << boost::format("%1% point tracks and %2% cameras\n") %
|
||||
mydata.number_tracks() % mydata.number_cameras()
|
||||
mydata.numberTracks() % mydata.numberCameras()
|
||||
<< endl;
|
||||
|
||||
tictoc_print_();
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
* Passing function argument allows to specificy an initial position, a pose increment and step count.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
// As this is a full 3D problem, we will use Pose3 variables to represent the camera
|
||||
// positions and Point3 variables (x, y, z) to represent the landmark coordinates.
|
||||
// Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y).
|
||||
|
@ -66,4 +68,4 @@ std::vector<gtsam::Pose3> createPoses(
|
|||
}
|
||||
|
||||
return poses;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,10 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <boost/version.hpp>
|
||||
#if BOOST_VERSION >= 107400
|
||||
#include <boost/serialization/library_version_type.hpp>
|
||||
#endif
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/set.hpp>
|
||||
#include <gtsam/base/FastDefaultAllocator.h>
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
#include <boost/tokenizer.hpp>
|
||||
#include <boost/format.hpp>
|
||||
|
||||
#include <cstdarg>
|
||||
#include <cstring>
|
||||
|
|
|
@ -26,12 +26,9 @@
|
|||
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/config.h>
|
||||
|
||||
#include <boost/format.hpp>
|
||||
#include <functional>
|
||||
#include <boost/tuple/tuple.hpp>
|
||||
#include <boost/math/special_functions/fpclassify.hpp>
|
||||
|
||||
#include <vector>
|
||||
|
||||
/**
|
||||
* Matrix is a typedef in the gtsam namespace
|
||||
|
@ -523,82 +520,4 @@ GTSAM_EXPORT Matrix LLt(const Matrix& A);
|
|||
GTSAM_EXPORT Matrix RtR(const Matrix& A);
|
||||
|
||||
GTSAM_EXPORT Vector columnNormSquare(const Matrix &A);
|
||||
} // namespace gtsam
|
||||
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include <boost/serialization/split_free.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
/**
|
||||
* Ref. https://stackoverflow.com/questions/18382457/eigen-and-boostserialize/22903063#22903063
|
||||
*
|
||||
* Eigen supports calling resize() on both static and dynamic matrices.
|
||||
* This allows for a uniform API, with resize having no effect if the static matrix
|
||||
* is already the correct size.
|
||||
* https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html#TutorialMatrixSizesResizing
|
||||
*
|
||||
* We use all the Matrix template parameters to ensure wide compatibility.
|
||||
*
|
||||
* eigen_typekit in ROS uses the same code
|
||||
* http://docs.ros.org/lunar/api/eigen_typekit/html/eigen__mqueue_8cpp_source.html
|
||||
*/
|
||||
|
||||
// split version - sends sizes ahead
|
||||
template<class Archive,
|
||||
typename Scalar_,
|
||||
int Rows_,
|
||||
int Cols_,
|
||||
int Ops_,
|
||||
int MaxRows_,
|
||||
int MaxCols_>
|
||||
void save(Archive & ar,
|
||||
const Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||
const unsigned int /*version*/) {
|
||||
const size_t rows = m.rows(), cols = m.cols();
|
||||
ar << BOOST_SERIALIZATION_NVP(rows);
|
||||
ar << BOOST_SERIALIZATION_NVP(cols);
|
||||
ar << make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
template<class Archive,
|
||||
typename Scalar_,
|
||||
int Rows_,
|
||||
int Cols_,
|
||||
int Ops_,
|
||||
int MaxRows_,
|
||||
int MaxCols_>
|
||||
void load(Archive & ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||
const unsigned int /*version*/) {
|
||||
size_t rows, cols;
|
||||
ar >> BOOST_SERIALIZATION_NVP(rows);
|
||||
ar >> BOOST_SERIALIZATION_NVP(cols);
|
||||
m.resize(rows, cols);
|
||||
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
// templated version of BOOST_SERIALIZATION_SPLIT_FREE(Eigen::Matrix);
|
||||
template<class Archive,
|
||||
typename Scalar_,
|
||||
int Rows_,
|
||||
int Cols_,
|
||||
int Ops_,
|
||||
int MaxRows_,
|
||||
int MaxCols_>
|
||||
void serialize(Archive & ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_> & m,
|
||||
const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
// specialized to Matrix for MATLAB wrapper
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, gtsam::Matrix& m, const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
} // namespace serialization
|
||||
} // namespace boost
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file MatrixSerialization.h
|
||||
* @brief Serialization for matrices
|
||||
* @author Frank Dellaert
|
||||
* @date February 2022
|
||||
*/
|
||||
|
||||
// \callgraph
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/split_free.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
/**
|
||||
* Ref.
|
||||
* https://stackoverflow.com/questions/18382457/eigen-and-boostserialize/22903063#22903063
|
||||
*
|
||||
* Eigen supports calling resize() on both static and dynamic matrices.
|
||||
* This allows for a uniform API, with resize having no effect if the static
|
||||
* matrix is already the correct size.
|
||||
* https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html#TutorialMatrixSizesResizing
|
||||
*
|
||||
* We use all the Matrix template parameters to ensure wide compatibility.
|
||||
*
|
||||
* eigen_typekit in ROS uses the same code
|
||||
* http://docs.ros.org/lunar/api/eigen_typekit/html/eigen__mqueue_8cpp_source.html
|
||||
*/
|
||||
|
||||
// split version - sends sizes ahead
|
||||
template <class Archive, typename Scalar_, int Rows_, int Cols_, int Ops_,
|
||||
int MaxRows_, int MaxCols_>
|
||||
void save(
|
||||
Archive& ar,
|
||||
const Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_>& m,
|
||||
const unsigned int /*version*/) {
|
||||
const size_t rows = m.rows(), cols = m.cols();
|
||||
ar << BOOST_SERIALIZATION_NVP(rows);
|
||||
ar << BOOST_SERIALIZATION_NVP(cols);
|
||||
ar << make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
template <class Archive, typename Scalar_, int Rows_, int Cols_, int Ops_,
|
||||
int MaxRows_, int MaxCols_>
|
||||
void load(Archive& ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_>& m,
|
||||
const unsigned int /*version*/) {
|
||||
size_t rows, cols;
|
||||
ar >> BOOST_SERIALIZATION_NVP(rows);
|
||||
ar >> BOOST_SERIALIZATION_NVP(cols);
|
||||
m.resize(rows, cols);
|
||||
ar >> make_nvp("data", make_array(m.data(), m.size()));
|
||||
}
|
||||
|
||||
// templated version of BOOST_SERIALIZATION_SPLIT_FREE(Eigen::Matrix);
|
||||
template <class Archive, typename Scalar_, int Rows_, int Cols_, int Ops_,
|
||||
int MaxRows_, int MaxCols_>
|
||||
void serialize(
|
||||
Archive& ar,
|
||||
Eigen::Matrix<Scalar_, Rows_, Cols_, Ops_, MaxRows_, MaxCols_>& m,
|
||||
const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
// specialized to Matrix for MATLAB wrapper
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, gtsam::Matrix& m, const unsigned int version) {
|
||||
split_free(ar, m, version);
|
||||
}
|
||||
|
||||
} // namespace serialization
|
||||
} // namespace boost
|
|
@ -20,6 +20,8 @@
|
|||
#pragma once
|
||||
#include <gtsam/config.h> // Configuration from CMake
|
||||
#include <Eigen/Dense>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
#ifndef OPTIONALJACOBIAN_NOBOOST
|
||||
#include <boost/optional.hpp>
|
||||
|
@ -96,6 +98,24 @@ public:
|
|||
usurp(dynamic->data());
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Constructor from an Eigen::Ref *value*. Will not usurp if dimension is wrong
|
||||
* @note This is important so we don't overwrite someone else's memory!
|
||||
*/
|
||||
template<class MATRIX>
|
||||
OptionalJacobian(Eigen::Ref<MATRIX> dynamic_ref) :
|
||||
map_(nullptr) {
|
||||
if (dynamic_ref.rows() == Rows && dynamic_ref.cols() == Cols && !dynamic_ref.IsRowMajor) {
|
||||
usurp(dynamic_ref.data());
|
||||
} else {
|
||||
throw std::invalid_argument(
|
||||
std::string("OptionalJacobian called with wrong dimensions or "
|
||||
"storage order.\n"
|
||||
"Expected: ") +
|
||||
"(" + std::to_string(Rows) + ", " + std::to_string(Cols) + ")");
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef OPTIONALJACOBIAN_NOBOOST
|
||||
|
||||
/// Constructor with boost::none just makes empty
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <gtsam/config.h> // Configuration from CMake
|
||||
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/assume_abstract.hpp>
|
||||
#include <memory>
|
||||
|
||||
|
|
|
@ -264,46 +264,4 @@ GTSAM_EXPORT Vector concatVectors(const std::list<Vector>& vs);
|
|||
* concatenate Vectors
|
||||
*/
|
||||
GTSAM_EXPORT Vector concatVectors(size_t nrVectors, ...);
|
||||
} // namespace gtsam
|
||||
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include <boost/serialization/split_free.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
// split version - copies into an STL vector for serialization
|
||||
template<class Archive>
|
||||
void save(Archive & ar, const gtsam::Vector & v, unsigned int /*version*/) {
|
||||
const size_t size = v.size();
|
||||
ar << BOOST_SERIALIZATION_NVP(size);
|
||||
ar << make_nvp("data", make_array(v.data(), v.size()));
|
||||
}
|
||||
|
||||
template<class Archive>
|
||||
void load(Archive & ar, gtsam::Vector & v, unsigned int /*version*/) {
|
||||
size_t size;
|
||||
ar >> BOOST_SERIALIZATION_NVP(size);
|
||||
v.resize(size);
|
||||
ar >> make_nvp("data", make_array(v.data(), v.size()));
|
||||
}
|
||||
|
||||
// split version - copies into an STL vector for serialization
|
||||
template<class Archive, int D>
|
||||
void save(Archive & ar, const Eigen::Matrix<double,D,1> & v, unsigned int /*version*/) {
|
||||
ar << make_nvp("data", make_array(v.data(), v.RowsAtCompileTime));
|
||||
}
|
||||
|
||||
template<class Archive, int D>
|
||||
void load(Archive & ar, Eigen::Matrix<double,D,1> & v, unsigned int /*version*/) {
|
||||
ar >> make_nvp("data", make_array(v.data(), v.RowsAtCompileTime));
|
||||
}
|
||||
|
||||
} // namespace serialization
|
||||
} // namespace boost
|
||||
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector2)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector3)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector6)
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file VectorSerialization.h
|
||||
* @brief serialization for Vectors
|
||||
* @author Frank Dellaert
|
||||
* @date February 2022
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Vector.h>
|
||||
|
||||
#include <boost/serialization/array.hpp>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/serialization/split_free.hpp>
|
||||
|
||||
namespace boost {
|
||||
namespace serialization {
|
||||
|
||||
// split version - copies into an STL vector for serialization
|
||||
template <class Archive>
|
||||
void save(Archive& ar, const gtsam::Vector& v, unsigned int /*version*/) {
|
||||
const size_t size = v.size();
|
||||
ar << BOOST_SERIALIZATION_NVP(size);
|
||||
ar << make_nvp("data", make_array(v.data(), v.size()));
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void load(Archive& ar, gtsam::Vector& v, unsigned int /*version*/) {
|
||||
size_t size;
|
||||
ar >> BOOST_SERIALIZATION_NVP(size);
|
||||
v.resize(size);
|
||||
ar >> make_nvp("data", make_array(v.data(), v.size()));
|
||||
}
|
||||
|
||||
// split version - copies into an STL vector for serialization
|
||||
template <class Archive, int D>
|
||||
void save(Archive& ar, const Eigen::Matrix<double, D, 1>& v,
|
||||
unsigned int /*version*/) {
|
||||
ar << make_nvp("data", make_array(v.data(), v.RowsAtCompileTime));
|
||||
}
|
||||
|
||||
template <class Archive, int D>
|
||||
void load(Archive& ar, Eigen::Matrix<double, D, 1>& v,
|
||||
unsigned int /*version*/) {
|
||||
ar >> make_nvp("data", make_array(v.data(), v.RowsAtCompileTime));
|
||||
}
|
||||
|
||||
} // namespace serialization
|
||||
} // namespace boost
|
||||
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector2)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector3)
|
||||
BOOST_SERIALIZATION_SPLIT_FREE(gtsam::Vector6)
|
|
@ -18,6 +18,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/MatrixSerialization.h>
|
||||
#include <gtsam/base/FastVector.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
|
|
@ -82,6 +82,7 @@ class IndexPairSetMap {
|
|||
};
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/MatrixSerialization.h>
|
||||
bool linear_independent(Matrix A, Matrix B, double tol);
|
||||
|
||||
#include <gtsam/base/Value.h>
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#pragma once
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <string>
|
||||
|
||||
// includes for standard serialization types
|
||||
#include <boost/serialization/version.hpp>
|
||||
#include <boost/serialization/optional.hpp>
|
||||
#include <boost/serialization/shared_ptr.hpp>
|
||||
#include <boost/serialization/vector.hpp>
|
||||
|
|
|
@ -42,7 +42,7 @@ T create() {
|
|||
}
|
||||
|
||||
// Creates or empties a folder in the build folder and returns the relative path
|
||||
boost::filesystem::path resetFilesystem(
|
||||
inline boost::filesystem::path resetFilesystem(
|
||||
boost::filesystem::path folder = "actual") {
|
||||
boost::filesystem::remove_all(folder);
|
||||
boost::filesystem::create_directory(folder);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <gtsam/inference/Key.h>
|
||||
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/base/MatrixSerialization.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/base/FastList.h>
|
||||
#include <gtsam/base/FastMap.h>
|
||||
|
|
|
@ -92,7 +92,7 @@ Matrix kroneckerProductIdentity(const Weights& w) {
|
|||
|
||||
/// CRTP Base class for function bases
|
||||
template <typename DERIVED>
|
||||
class GTSAM_EXPORT Basis {
|
||||
class Basis {
|
||||
public:
|
||||
/**
|
||||
* Calculate weights for all x in vector X.
|
||||
|
@ -497,11 +497,6 @@ class GTSAM_EXPORT Basis {
|
|||
}
|
||||
};
|
||||
|
||||
// Vector version for MATLAB :-(
|
||||
static double Derivative(double x, const Vector& p, //
|
||||
OptionalJacobian</*1xN*/ -1, -1> H = boost::none) {
|
||||
return DerivativeFunctor(x)(p.transpose(), H);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -29,9 +29,12 @@ namespace gtsam {
|
|||
* pseudo-spectral parameterization.
|
||||
*
|
||||
* @tparam BASIS The basis class to use e.g. Chebyshev2
|
||||
*
|
||||
* Example, degree 8 Chebyshev polynomial measured at x=0.5:
|
||||
* EvaluationFactor<Chebyshev2> factor(key, measured, model, 8, 0.5);
|
||||
*/
|
||||
template <class BASIS>
|
||||
class GTSAM_EXPORT EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
||||
class EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, Vector>;
|
||||
|
||||
|
@ -47,7 +50,7 @@ class GTSAM_EXPORT EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
|||
* @param N The degree of the polynomial.
|
||||
* @param x The point at which to evaluate the polynomial.
|
||||
*/
|
||||
EvaluationFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
EvaluationFactor(Key key, double z, const SharedNoiseModel &model,
|
||||
const size_t N, double x)
|
||||
: Base(key, z, model, typename BASIS::EvaluationFunctor(N, x)) {}
|
||||
|
||||
|
@ -62,7 +65,7 @@ class GTSAM_EXPORT EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
|||
* @param a Lower bound for the polynomial.
|
||||
* @param b Upper bound for the polynomial.
|
||||
*/
|
||||
EvaluationFactor(Key key, const double &z, const SharedNoiseModel &model,
|
||||
EvaluationFactor(Key key, double z, const SharedNoiseModel &model,
|
||||
const size_t N, double x, double a, double b)
|
||||
: Base(key, z, model, typename BASIS::EvaluationFunctor(N, x, a, b)) {}
|
||||
|
||||
|
@ -85,7 +88,7 @@ class GTSAM_EXPORT EvaluationFactor : public FunctorizedFactor<double, Vector> {
|
|||
* @param M: Size of the evaluated state vector.
|
||||
*/
|
||||
template <class BASIS, int M>
|
||||
class GTSAM_EXPORT VectorEvaluationFactor
|
||||
class VectorEvaluationFactor
|
||||
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
|
||||
|
@ -148,7 +151,7 @@ class GTSAM_EXPORT VectorEvaluationFactor
|
|||
* where N is the degree and i is the component index.
|
||||
*/
|
||||
template <class BASIS, size_t P>
|
||||
class GTSAM_EXPORT VectorComponentFactor
|
||||
class VectorComponentFactor
|
||||
: public FunctorizedFactor<double, ParameterMatrix<P>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
|
||||
|
@ -217,7 +220,7 @@ class GTSAM_EXPORT VectorComponentFactor
|
|||
* where `x` is the value (e.g. timestep) at which the rotation was evaluated.
|
||||
*/
|
||||
template <class BASIS, typename T>
|
||||
class GTSAM_EXPORT ManifoldEvaluationFactor
|
||||
class ManifoldEvaluationFactor
|
||||
: public FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<T, ParameterMatrix<traits<T>::dimension>>;
|
||||
|
@ -269,7 +272,7 @@ class GTSAM_EXPORT ManifoldEvaluationFactor
|
|||
* @param BASIS: The basis class to use e.g. Chebyshev2
|
||||
*/
|
||||
template <class BASIS>
|
||||
class GTSAM_EXPORT DerivativeFactor
|
||||
class DerivativeFactor
|
||||
: public FunctorizedFactor<double, typename BASIS::Parameters> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, typename BASIS::Parameters>;
|
||||
|
@ -318,7 +321,7 @@ class GTSAM_EXPORT DerivativeFactor
|
|||
* @param M: Size of the evaluated state vector derivative.
|
||||
*/
|
||||
template <class BASIS, int M>
|
||||
class GTSAM_EXPORT VectorDerivativeFactor
|
||||
class VectorDerivativeFactor
|
||||
: public FunctorizedFactor<Vector, ParameterMatrix<M>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<Vector, ParameterMatrix<M>>;
|
||||
|
@ -371,7 +374,7 @@ class GTSAM_EXPORT VectorDerivativeFactor
|
|||
* @param P: Size of the control component derivative.
|
||||
*/
|
||||
template <class BASIS, int P>
|
||||
class GTSAM_EXPORT ComponentDerivativeFactor
|
||||
class ComponentDerivativeFactor
|
||||
: public FunctorizedFactor<double, ParameterMatrix<P>> {
|
||||
private:
|
||||
using Base = FunctorizedFactor<double, ParameterMatrix<P>>;
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/basis/Basis.h>
|
||||
|
||||
#include <unsupported/Eigen/KroneckerProduct>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
|
@ -31,7 +29,7 @@ namespace gtsam {
|
|||
* These are typically denoted with the symbol T_n, where n is the degree.
|
||||
* The parameter N is the number of coefficients, i.e., N = n+1.
|
||||
*/
|
||||
struct Chebyshev1Basis : Basis<Chebyshev1Basis> {
|
||||
struct GTSAM_EXPORT Chebyshev1Basis : Basis<Chebyshev1Basis> {
|
||||
using Parameters = Eigen::Matrix<double, -1, 1 /*Nx1*/>;
|
||||
|
||||
Parameters parameters_;
|
||||
|
@ -79,7 +77,7 @@ struct Chebyshev1Basis : Basis<Chebyshev1Basis> {
|
|||
* functions. In this sense, they are like the sines and cosines of the Fourier
|
||||
* basis.
|
||||
*/
|
||||
struct Chebyshev2Basis : Basis<Chebyshev2Basis> {
|
||||
struct GTSAM_EXPORT Chebyshev2Basis : Basis<Chebyshev2Basis> {
|
||||
using Parameters = Eigen::Matrix<double, -1, 1 /*Nx1*/>;
|
||||
|
||||
/**
|
||||
|
|
|
@ -22,8 +22,7 @@
|
|||
*
|
||||
* This is different from Chebyshev.h since it leverage ideas from
|
||||
* pseudo-spectral optimization, i.e. we don't decompose into basis functions,
|
||||
* rather estimate function parameters that enforce function nodes at Chebyshev
|
||||
* points.
|
||||
* rather estimate function values at the Chebyshev points.
|
||||
*
|
||||
* Please refer to Agrawal21icra for more details.
|
||||
*
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
namespace gtsam {
|
||||
|
||||
/// Fourier basis
|
||||
class GTSAM_EXPORT FourierBasis : public Basis<FourierBasis> {
|
||||
class FourierBasis : public Basis<FourierBasis> {
|
||||
public:
|
||||
using Parameters = Eigen::Matrix<double, /*Nx1*/ -1, 1>;
|
||||
using DiffMatrix = Eigen::Matrix<double, /*NxN*/ -1, -1>;
|
||||
|
|
|
@ -44,9 +44,6 @@ class Chebyshev2 {
|
|||
static Matrix DerivativeWeights(size_t N, double x, double a, double b);
|
||||
static Matrix IntegrationWeights(size_t N, double a, double b);
|
||||
static Matrix DifferentiationMatrix(size_t N, double a, double b);
|
||||
|
||||
// TODO Needs OptionalJacobian
|
||||
// static double Derivative(double x, Vector f);
|
||||
};
|
||||
|
||||
#include <gtsam/basis/ParameterMatrix.h>
|
||||
|
|
|
@ -0,0 +1,230 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------1-------------------------------------------
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file testBasisFactors.cpp
|
||||
* @date May 31, 2020
|
||||
* @author Varun Agrawal
|
||||
* @brief unit tests for factors in BasisFactors.h
|
||||
*/
|
||||
|
||||
#include <gtsam/basis/Basis.h>
|
||||
#include <gtsam/basis/BasisFactors.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/nonlinear/FunctorizedFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
using gtsam::noiseModel::Isotropic;
|
||||
using gtsam::Pose2;
|
||||
using gtsam::Vector;
|
||||
using gtsam::Values;
|
||||
using gtsam::Chebyshev2;
|
||||
using gtsam::ParameterMatrix;
|
||||
using gtsam::LevenbergMarquardtParams;
|
||||
using gtsam::LevenbergMarquardtOptimizer;
|
||||
using gtsam::NonlinearFactorGraph;
|
||||
using gtsam::NonlinearOptimizerParams;
|
||||
|
||||
constexpr size_t N = 2;
|
||||
|
||||
// Key used in all tests
|
||||
const gtsam::Symbol key('X', 0);
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, EvaluationFactor) {
|
||||
using gtsam::EvaluationFactor;
|
||||
|
||||
double measured = 0;
|
||||
|
||||
auto model = Isotropic::Sigma(1, 1.0);
|
||||
EvaluationFactor<Chebyshev2> factor(key, measured, model, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(factor);
|
||||
|
||||
Vector functionValues(N);
|
||||
functionValues.setZero();
|
||||
|
||||
Values initial;
|
||||
initial.insert<Vector>(key, functionValues);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, VectorEvaluationFactor) {
|
||||
using gtsam::VectorEvaluationFactor;
|
||||
const size_t M = 4;
|
||||
|
||||
const Vector measured = Vector::Zero(M);
|
||||
|
||||
auto model = Isotropic::Sigma(M, 1.0);
|
||||
VectorEvaluationFactor<Chebyshev2, M> factor(key, measured, model, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(factor);
|
||||
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, Print) {
|
||||
using gtsam::VectorEvaluationFactor;
|
||||
const size_t M = 1;
|
||||
|
||||
const Vector measured = Vector::Ones(M) * 42;
|
||||
|
||||
auto model = Isotropic::Sigma(M, 1.0);
|
||||
VectorEvaluationFactor<Chebyshev2, M> factor(key, measured, model, N, 0);
|
||||
|
||||
std::string expected =
|
||||
" keys = { X0 }\n"
|
||||
" noise model: unit (1) \n"
|
||||
"FunctorizedFactor(X0)\n"
|
||||
" measurement: [\n"
|
||||
" 42\n"
|
||||
"]\n"
|
||||
" noise model sigmas: 1\n";
|
||||
|
||||
EXPECT(assert_print_equal(expected, factor));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, VectorComponentFactor) {
|
||||
using gtsam::VectorComponentFactor;
|
||||
const int P = 4;
|
||||
const size_t i = 2;
|
||||
const double measured = 0.0, t = 3.0, a = 2.0, b = 4.0;
|
||||
auto model = Isotropic::Sigma(1, 1.0);
|
||||
VectorComponentFactor<Chebyshev2, P> factor(key, measured, model, N, i,
|
||||
t, a, b);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(factor);
|
||||
|
||||
ParameterMatrix<P> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<P>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, ManifoldEvaluationFactor) {
|
||||
using gtsam::ManifoldEvaluationFactor;
|
||||
const Pose2 measured;
|
||||
const double t = 3.0, a = 2.0, b = 4.0;
|
||||
auto model = Isotropic::Sigma(3, 1.0);
|
||||
ManifoldEvaluationFactor<Chebyshev2, Pose2> factor(key, measured, model, N,
|
||||
t, a, b);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(factor);
|
||||
|
||||
ParameterMatrix<3> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<3>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, VecDerivativePrior) {
|
||||
using gtsam::VectorDerivativeFactor;
|
||||
const size_t M = 4;
|
||||
|
||||
const Vector measured = Vector::Zero(M);
|
||||
auto model = Isotropic::Sigma(M, 1.0);
|
||||
VectorDerivativeFactor<Chebyshev2, M> vecDPrior(key, measured, model, N, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(vecDPrior);
|
||||
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
|
||||
Values initial;
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(BasisFactors, ComponentDerivativeFactor) {
|
||||
using gtsam::ComponentDerivativeFactor;
|
||||
const size_t M = 4;
|
||||
|
||||
double measured = 0;
|
||||
auto model = Isotropic::Sigma(1, 1.0);
|
||||
ComponentDerivativeFactor<Chebyshev2, M> controlDPrior(key, measured, model,
|
||||
N, 0, 0);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.add(controlDPrior);
|
||||
|
||||
Values initial;
|
||||
ParameterMatrix<M> stateMatrix(N);
|
||||
initial.insert<ParameterMatrix<M>>(key, stateMatrix);
|
||||
|
||||
LevenbergMarquardtParams parameters;
|
||||
parameters.setMaxIterations(20);
|
||||
Values result =
|
||||
LevenbergMarquardtOptimizer(graph, initial, parameters).optimize();
|
||||
|
||||
EXPECT_DOUBLES_EQUAL(0, graph.error(result), 1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -25,9 +25,10 @@
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
namespace {
|
||||
auto model = noiseModel::Unit::Create(1);
|
||||
|
||||
const size_t N = 3;
|
||||
} // namespace
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev, Chebyshev1) {
|
||||
|
|
|
@ -10,26 +10,30 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testChebyshev.cpp
|
||||
* @file testChebyshev2.cpp
|
||||
* @date July 4, 2020
|
||||
* @author Varun Agrawal
|
||||
* @brief Unit tests for Chebyshev Basis Decompositions via pseudo-spectral
|
||||
* methods
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/basis/Chebyshev2.h>
|
||||
#include <gtsam/basis/FitBasis.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
using namespace boost::placeholders;
|
||||
|
||||
namespace {
|
||||
noiseModel::Diagonal::shared_ptr model = noiseModel::Unit::Create(1);
|
||||
|
||||
const size_t N = 32;
|
||||
} // namespace
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, Point) {
|
||||
|
@ -121,12 +125,30 @@ TEST(Chebyshev2, InterpolateVector) {
|
|||
EXPECT(assert_equal(numericalH, actualH, 1e-9));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Interpolating poses using the exponential map
|
||||
TEST(Chebyshev2, InterpolatePose2) {
|
||||
double t = 30, a = 0, b = 100;
|
||||
|
||||
ParameterMatrix<3> X(N);
|
||||
X.row(0) = Chebyshev2::Points(N, a, b); // slope 1 ramp
|
||||
X.row(1) = Vector::Zero(N);
|
||||
X.row(2) = 0.1 * Vector::Ones(N);
|
||||
|
||||
Vector xi(3);
|
||||
xi << t, 0, 0.1;
|
||||
Chebyshev2::ManifoldEvaluationFunctor<Pose2> fx(N, t, a, b);
|
||||
// We use xi as canonical coordinates via exponential map
|
||||
Pose2 expected = Pose2::ChartAtOrigin::Retract(xi);
|
||||
EXPECT(assert_equal(expected, fx(X)));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Chebyshev2, Decomposition) {
|
||||
// Create example sequence
|
||||
Sequence sequence;
|
||||
for (size_t i = 0; i < 16; i++) {
|
||||
double x = (double)i / 16. - 0.99, y = x;
|
||||
double x = (1.0/ 16)*i - 0.99, y = x;
|
||||
sequence[x] = y;
|
||||
}
|
||||
|
||||
|
@ -144,11 +166,11 @@ TEST(Chebyshev2, DifferentiationMatrix3) {
|
|||
// Trefethen00book, p.55
|
||||
const size_t N = 3;
|
||||
Matrix expected(N, N);
|
||||
// Differentiation matrix computed from Chebfun
|
||||
// Differentiation matrix computed from chebfun
|
||||
expected << 1.5000, -2.0000, 0.5000, //
|
||||
0.5000, -0.0000, -0.5000, //
|
||||
-0.5000, 2.0000, -1.5000;
|
||||
// multiply by -1 since the cheb points have a phase shift wrt Trefethen
|
||||
// multiply by -1 since the chebyshev points have a phase shift wrt Trefethen
|
||||
// This was verified with chebfun
|
||||
expected = -expected;
|
||||
|
||||
|
@ -167,7 +189,7 @@ TEST(Chebyshev2, DerivativeMatrix6) {
|
|||
0.3820, -0.8944, 1.6180, 0.1708, -2.0000, 0.7236, //
|
||||
-0.2764, 0.6180, -0.8944, 2.0000, 1.1708, -2.6180, //
|
||||
0.5000, -1.1056, 1.5279, -2.8944, 10.4721, -8.5000;
|
||||
// multiply by -1 since the cheb points have a phase shift wrt Trefethen
|
||||
// multiply by -1 since the chebyshev points have a phase shift wrt Trefethen
|
||||
// This was verified with chebfun
|
||||
expected = -expected;
|
||||
|
||||
|
@ -252,7 +274,7 @@ TEST(Chebyshev2, DerivativeWeights2) {
|
|||
Weights dWeights2 = Chebyshev2::DerivativeWeights(N, x2, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x2), dWeights2 * fvals, 1e-8);
|
||||
|
||||
// test if derivative calculation and cheb point is correct
|
||||
// test if derivative calculation and Chebyshev point is correct
|
||||
double x3 = Chebyshev2::Point(N, 3, a, b);
|
||||
Weights dWeights3 = Chebyshev2::DerivativeWeights(N, x3, a, b);
|
||||
EXPECT_DOUBLES_EQUAL(fprime(x3), dWeights3 * fvals, 1e-8);
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file AlgebraicDecisionTree.cpp
|
||||
* @date Feb 20, 2022
|
||||
* @author Mike Sheffler
|
||||
* @author Duy-Nguyen Ta
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include "AlgebraicDecisionTree.h"
|
||||
|
||||
#include <gtsam/base/types.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
template class AlgebraicDecisionTree<Key>;
|
||||
|
||||
} // namespace gtsam
|
|
@ -127,7 +127,7 @@ namespace gtsam {
|
|||
return map.at(label);
|
||||
};
|
||||
std::function<double(const double&)> op = Ring::id;
|
||||
this->root_ = this->template convertFrom(other.root_, L_of_M, op);
|
||||
this->root_ = DecisionTree<L, double>::convertFrom(other.root_, L_of_M, op);
|
||||
}
|
||||
|
||||
/** sum */
|
||||
|
@ -160,7 +160,7 @@ namespace gtsam {
|
|||
const typename Base::LabelFormatter& labelFormatter =
|
||||
&DefaultFormatter) const {
|
||||
auto valueFormatter = [](const double& v) {
|
||||
return (boost::format("%4.4g") % v).str();
|
||||
return (boost::format("%4.8g") % v).str();
|
||||
};
|
||||
Base::print(s, labelFormatter, valueFormatter);
|
||||
}
|
||||
|
|
|
@ -33,6 +33,8 @@ namespace gtsam {
|
|||
template <class L>
|
||||
class Assignment : public std::map<L, size_t> {
|
||||
public:
|
||||
using std::map<L, size_t>::operator=;
|
||||
|
||||
void print(const std::string& s = "Assignment: ") const {
|
||||
std::cout << s << ": ";
|
||||
for (const typename Assignment::value_type& keyValue : *this)
|
||||
|
|
|
@ -59,33 +59,41 @@ namespace gtsam {
|
|||
/** constant stored in this leaf */
|
||||
Y constant_;
|
||||
|
||||
/** Constructor from constant */
|
||||
Leaf(const Y& constant) :
|
||||
constant_(constant) {}
|
||||
/** The number of assignments contained within this leaf.
|
||||
* Particularly useful when leaves have been pruned.
|
||||
*/
|
||||
size_t nrAssignments_;
|
||||
|
||||
/** return the constant */
|
||||
/// Constructor from constant
|
||||
Leaf(const Y& constant, size_t nrAssignments = 1)
|
||||
: constant_(constant), nrAssignments_(nrAssignments) {}
|
||||
|
||||
/// Return the constant
|
||||
const Y& constant() const {
|
||||
return constant_;
|
||||
}
|
||||
|
||||
/// Return the number of assignments contained within this leaf.
|
||||
size_t nrAssignments() const { return nrAssignments_; }
|
||||
|
||||
/// Leaf-Leaf equality
|
||||
bool sameLeaf(const Leaf& q) const override {
|
||||
return constant_ == q.constant_;
|
||||
}
|
||||
|
||||
/// polymorphic equality: is q is a leaf, could be
|
||||
/// polymorphic equality: is q a leaf and is it the same as this leaf?
|
||||
bool sameLeaf(const Node& q) const override {
|
||||
return (q.isLeaf() && q.sameLeaf(*this));
|
||||
}
|
||||
|
||||
/** equality up to tolerance */
|
||||
/// equality up to tolerance
|
||||
bool equals(const Node& q, const CompareFunc& compare) const override {
|
||||
const Leaf* other = dynamic_cast<const Leaf*>(&q);
|
||||
if (!other) return false;
|
||||
return compare(this->constant_, other->constant_);
|
||||
}
|
||||
|
||||
/** print */
|
||||
/// print
|
||||
void print(const std::string& s, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter) const override {
|
||||
std::cout << s << " Leaf " << valueFormatter(constant_) << std::endl;
|
||||
|
@ -108,7 +116,14 @@ namespace gtsam {
|
|||
|
||||
/** apply unary operator */
|
||||
NodePtr apply(const Unary& op) const override {
|
||||
NodePtr f(new Leaf(op(constant_)));
|
||||
NodePtr f(new Leaf(op(constant_), nrAssignments_));
|
||||
return f;
|
||||
}
|
||||
|
||||
/// Apply unary operator with assignment
|
||||
NodePtr apply(const UnaryAssignment& op,
|
||||
const Assignment<L>& assignment) const override {
|
||||
NodePtr f(new Leaf(op(assignment, constant_), nrAssignments_));
|
||||
return f;
|
||||
}
|
||||
|
||||
|
@ -123,7 +138,8 @@ namespace gtsam {
|
|||
|
||||
// Applying binary operator to two leaves results in a leaf
|
||||
NodePtr apply_g_op_fL(const Leaf& fL, const Binary& op) const override {
|
||||
NodePtr h(new Leaf(op(fL.constant_, constant_))); // fL op gL
|
||||
// fL op gL
|
||||
NodePtr h(new Leaf(op(fL.constant_, constant_), nrAssignments_));
|
||||
return h;
|
||||
}
|
||||
|
||||
|
@ -134,7 +150,7 @@ namespace gtsam {
|
|||
|
||||
/** choose a branch, create new memory ! */
|
||||
NodePtr choose(const L& label, size_t index) const override {
|
||||
return NodePtr(new Leaf(constant()));
|
||||
return NodePtr(new Leaf(constant(), nrAssignments()));
|
||||
}
|
||||
|
||||
bool isLeaf() const override { return true; }
|
||||
|
@ -152,7 +168,10 @@ namespace gtsam {
|
|||
std::vector<NodePtr> branches_;
|
||||
|
||||
private:
|
||||
/** incremental allSame */
|
||||
/**
|
||||
* Incremental allSame.
|
||||
* Records if all the branches are the same leaf.
|
||||
*/
|
||||
size_t allSame_;
|
||||
|
||||
using ChoicePtr = boost::shared_ptr<const Choice>;
|
||||
|
@ -165,15 +184,22 @@ namespace gtsam {
|
|||
#endif
|
||||
}
|
||||
|
||||
/** If all branches of a choice node f are the same, just return a branch */
|
||||
/// If all branches of a choice node f are the same, just return a branch.
|
||||
static NodePtr Unique(const ChoicePtr& f) {
|
||||
#ifndef DT_NO_PRUNING
|
||||
#ifndef GTSAM_DT_NO_PRUNING
|
||||
if (f->allSame_) {
|
||||
assert(f->branches().size() > 0);
|
||||
NodePtr f0 = f->branches_[0];
|
||||
assert(f0->isLeaf());
|
||||
|
||||
size_t nrAssignments = 0;
|
||||
for(auto branch: f->branches()) {
|
||||
assert(branch->isLeaf());
|
||||
nrAssignments +=
|
||||
boost::dynamic_pointer_cast<const Leaf>(branch)->nrAssignments();
|
||||
}
|
||||
NodePtr newLeaf(
|
||||
new Leaf(boost::dynamic_pointer_cast<const Leaf>(f0)->constant()));
|
||||
new Leaf(boost::dynamic_pointer_cast<const Leaf>(f0)->constant(),
|
||||
nrAssignments));
|
||||
return newLeaf;
|
||||
} else
|
||||
#endif
|
||||
|
@ -182,15 +208,13 @@ namespace gtsam {
|
|||
|
||||
bool isLeaf() const override { return false; }
|
||||
|
||||
/** Constructor, given choice label and mandatory expected branch count */
|
||||
/// Constructor, given choice label and mandatory expected branch count.
|
||||
Choice(const L& label, size_t count) :
|
||||
label_(label), allSame_(true) {
|
||||
branches_.reserve(count);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct from applying binary op to two Choice nodes
|
||||
*/
|
||||
/// Construct from applying binary op to two Choice nodes.
|
||||
Choice(const Choice& f, const Choice& g, const Binary& op) :
|
||||
allSame_(true) {
|
||||
// Choose what to do based on label
|
||||
|
@ -218,6 +242,7 @@ namespace gtsam {
|
|||
}
|
||||
}
|
||||
|
||||
/// Return the label of this choice node.
|
||||
const L& label() const {
|
||||
return label_;
|
||||
}
|
||||
|
@ -239,7 +264,7 @@ namespace gtsam {
|
|||
branches_.push_back(node);
|
||||
}
|
||||
|
||||
/** print (as a tree) */
|
||||
/// print (as a tree).
|
||||
void print(const std::string& s, const LabelFormatter& labelFormatter,
|
||||
const ValueFormatter& valueFormatter) const override {
|
||||
std::cout << s << " Choice(";
|
||||
|
@ -285,7 +310,7 @@ namespace gtsam {
|
|||
return (q.isLeaf() && q.sameLeaf(*this));
|
||||
}
|
||||
|
||||
/** equality */
|
||||
/// equality
|
||||
bool equals(const Node& q, const CompareFunc& compare) const override {
|
||||
const Choice* other = dynamic_cast<const Choice*>(&q);
|
||||
if (!other) return false;
|
||||
|
@ -298,7 +323,7 @@ namespace gtsam {
|
|||
return true;
|
||||
}
|
||||
|
||||
/** evaluate */
|
||||
/// evaluate
|
||||
const Y& operator()(const Assignment<L>& x) const override {
|
||||
#ifndef NDEBUG
|
||||
typename Assignment<L>::const_iterator it = x.find(label_);
|
||||
|
@ -313,21 +338,57 @@ namespace gtsam {
|
|||
return (*child)(x);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct from applying unary op to a Choice node
|
||||
*/
|
||||
/// Construct from applying unary op to a Choice node.
|
||||
Choice(const L& label, const Choice& f, const Unary& op) :
|
||||
label_(label), allSame_(true) {
|
||||
branches_.reserve(f.branches_.size()); // reserve space
|
||||
for (const NodePtr& branch : f.branches_) push_back(branch->apply(op));
|
||||
for (const NodePtr& branch : f.branches_) {
|
||||
push_back(branch->apply(op));
|
||||
}
|
||||
}
|
||||
|
||||
/** apply unary operator */
|
||||
/**
|
||||
* @brief Constructor which accepts a UnaryAssignment op and the
|
||||
* corresponding assignment.
|
||||
*
|
||||
* @param label The label for this node.
|
||||
* @param f The original choice node to apply the op on.
|
||||
* @param op Function to apply on the choice node. Takes Assignment and
|
||||
* value as arguments.
|
||||
* @param assignment The Assignment that will go to op.
|
||||
*/
|
||||
Choice(const L& label, const Choice& f, const UnaryAssignment& op,
|
||||
const Assignment<L>& assignment)
|
||||
: label_(label), allSame_(true) {
|
||||
branches_.reserve(f.branches_.size()); // reserve space
|
||||
|
||||
Assignment<L> assignment_ = assignment;
|
||||
|
||||
for (size_t i = 0; i < f.branches_.size(); i++) {
|
||||
assignment_[label_] = i; // Set assignment for label to i
|
||||
|
||||
const NodePtr branch = f.branches_[i];
|
||||
push_back(branch->apply(op, assignment_));
|
||||
|
||||
// Remove the assignment so we are backtracking
|
||||
auto assignment_it = assignment_.find(label_);
|
||||
assignment_.erase(assignment_it);
|
||||
}
|
||||
}
|
||||
|
||||
/// apply unary operator.
|
||||
NodePtr apply(const Unary& op) const override {
|
||||
auto r = boost::make_shared<Choice>(label_, *this, op);
|
||||
return Unique(r);
|
||||
}
|
||||
|
||||
/// Apply unary operator with assignment
|
||||
NodePtr apply(const UnaryAssignment& op,
|
||||
const Assignment<L>& assignment) const override {
|
||||
auto r = boost::make_shared<Choice>(label_, *this, op, assignment);
|
||||
return Unique(r);
|
||||
}
|
||||
|
||||
// Apply binary operator "h = f op g" on Choice node
|
||||
// Note op is not assumed commutative so we need to keep track of order
|
||||
// Simply calls apply on argument to call correct virtual method:
|
||||
|
@ -592,11 +653,13 @@ namespace gtsam {
|
|||
std::function<Y(const X&)> Y_of_X) const {
|
||||
using LY = DecisionTree<L, Y>;
|
||||
|
||||
// ugliness below because apparently we can't have templated virtual
|
||||
// functions If leaf, apply unary conversion "op" and create a unique leaf
|
||||
// Ugliness below because apparently we can't have templated virtual
|
||||
// functions.
|
||||
// If leaf, apply unary conversion "op" and create a unique leaf.
|
||||
using MXLeaf = typename DecisionTree<M, X>::Leaf;
|
||||
if (auto leaf = boost::dynamic_pointer_cast<const MXLeaf>(f))
|
||||
return NodePtr(new Leaf(Y_of_X(leaf->constant())));
|
||||
if (auto leaf = boost::dynamic_pointer_cast<const MXLeaf>(f)) {
|
||||
return NodePtr(new Leaf(Y_of_X(leaf->constant()), leaf->nrAssignments()));
|
||||
}
|
||||
|
||||
// Check if Choice
|
||||
using MXChoice = typename DecisionTree<M, X>::Choice;
|
||||
|
@ -617,7 +680,16 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/****************************************************************************/
|
||||
// Functor performing depth-first visit without Assignment<L> argument.
|
||||
/**
|
||||
* Functor performing depth-first visit to each leaf with the leaf value as
|
||||
* the argument.
|
||||
*
|
||||
* NOTE: We differentiate between leaves and assignments. Concretely, a 3
|
||||
* binary variable tree will have 2^3=8 assignments, but based on pruning, it
|
||||
* can have less than 8 leaves. For example, if a tree has all assignment
|
||||
* values as 1, then pruning will cause the tree to have only 1 leaf yet 8
|
||||
* assignments.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
struct Visit {
|
||||
using F = std::function<void(const Y&)>;
|
||||
|
@ -646,28 +718,74 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/****************************************************************************/
|
||||
// Functor performing depth-first visit with Assignment<L> argument.
|
||||
/**
|
||||
* Functor performing depth-first visit to each leaf with the Leaf object
|
||||
* passed as an argument.
|
||||
*
|
||||
* NOTE: We differentiate between leaves and assignments. Concretely, a 3
|
||||
* binary variable tree will have 2^3=8 assignments, but based on pruning, it
|
||||
* can have <8 leaves. For example, if a tree has all assignment values as 1,
|
||||
* then pruning will cause the tree to have only 1 leaf yet 8 assignments.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
struct VisitLeaf {
|
||||
using F = std::function<void(const typename DecisionTree<L, Y>::Leaf&)>;
|
||||
explicit VisitLeaf(F f) : f(f) {} ///< Construct from folding function.
|
||||
F f; ///< folding function object.
|
||||
|
||||
/// Do a depth-first visit on the tree rooted at node.
|
||||
void operator()(const typename DecisionTree<L, Y>::NodePtr& node) const {
|
||||
using Leaf = typename DecisionTree<L, Y>::Leaf;
|
||||
if (auto leaf = boost::dynamic_pointer_cast<const Leaf>(node))
|
||||
return f(*leaf);
|
||||
|
||||
using Choice = typename DecisionTree<L, Y>::Choice;
|
||||
auto choice = boost::dynamic_pointer_cast<const Choice>(node);
|
||||
if (!choice)
|
||||
throw std::invalid_argument("DecisionTree::VisitLeaf: Invalid NodePtr");
|
||||
for (auto&& branch : choice->branches()) (*this)(branch); // recurse!
|
||||
}
|
||||
};
|
||||
|
||||
template <typename L, typename Y>
|
||||
template <typename Func>
|
||||
void DecisionTree<L, Y>::visitLeaf(Func f) const {
|
||||
VisitLeaf<L, Y> visit(f);
|
||||
visit(root_);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
/**
|
||||
* Functor performing depth-first visit to each leaf with the leaf's
|
||||
* `Assignment<L>` and value passed as arguments.
|
||||
*
|
||||
* NOTE: Follows the same pruning semantics as `visit`.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
struct VisitWith {
|
||||
using Choices = Assignment<L>;
|
||||
using F = std::function<void(const Choices&, const Y&)>;
|
||||
using F = std::function<void(const Assignment<L>&, const Y&)>;
|
||||
explicit VisitWith(F f) : f(f) {} ///< Construct from folding function.
|
||||
Choices choices; ///< Assignment, mutating through recursion.
|
||||
F f; ///< folding function object.
|
||||
Assignment<L> assignment; ///< Assignment, mutating through recursion.
|
||||
F f; ///< folding function object.
|
||||
|
||||
/// Do a depth-first visit on the tree rooted at node.
|
||||
void operator()(const typename DecisionTree<L, Y>::NodePtr& node) {
|
||||
using Leaf = typename DecisionTree<L, Y>::Leaf;
|
||||
if (auto leaf = boost::dynamic_pointer_cast<const Leaf>(node))
|
||||
return f(choices, leaf->constant());
|
||||
return f(assignment, leaf->constant());
|
||||
|
||||
using Choice = typename DecisionTree<L, Y>::Choice;
|
||||
auto choice = boost::dynamic_pointer_cast<const Choice>(node);
|
||||
if (!choice)
|
||||
throw std::invalid_argument("DecisionTree::VisitWith: Invalid NodePtr");
|
||||
for (size_t i = 0; i < choice->nrChoices(); i++) {
|
||||
choices[choice->label()] = i; // Set assignment for label to i
|
||||
assignment[choice->label()] = i; // Set assignment for label to i
|
||||
|
||||
(*this)(choice->branches()[i]); // recurse!
|
||||
|
||||
// Remove the choice so we are backtracking
|
||||
auto choice_it = assignment.find(choice->label());
|
||||
assignment.erase(choice_it);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -679,6 +797,14 @@ namespace gtsam {
|
|||
visit(root_);
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
template <typename L, typename Y>
|
||||
size_t DecisionTree<L, Y>::nrLeaves() const {
|
||||
size_t total = 0;
|
||||
visit([&total](const Y& node) { total += 1; });
|
||||
return total;
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
// fold is just done with a visit
|
||||
template <typename L, typename Y>
|
||||
|
@ -689,12 +815,26 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/****************************************************************************/
|
||||
// labels is just done with a visit
|
||||
/**
|
||||
* Get (partial) labels by performing a visit.
|
||||
*
|
||||
* This method performs a depth-first search to go to every leaf and records
|
||||
* the keys assignment which leads to that leaf. Since the tree can be pruned,
|
||||
* there might be a leaf at a lower depth which results in a partial
|
||||
* assignment (i.e. not all keys are specified).
|
||||
*
|
||||
* E.g. given a tree with 3 keys, there may be a branch where the 3rd key has
|
||||
* the same values for all the leaves. This leads to the branch being pruned
|
||||
* so we get a leaf which is arrived at by just the first 2 keys and their
|
||||
* assignments.
|
||||
*/
|
||||
template <typename L, typename Y>
|
||||
std::set<L> DecisionTree<L, Y>::labels() const {
|
||||
std::set<L> unique;
|
||||
auto f = [&](const Assignment<L>& choices, const Y&) {
|
||||
for (auto&& kv : choices) unique.insert(kv.first);
|
||||
auto f = [&](const Assignment<L>& assignment, const Y&) {
|
||||
for (auto&& kv : assignment) {
|
||||
unique.insert(kv.first);
|
||||
}
|
||||
};
|
||||
visitWith(f);
|
||||
return unique;
|
||||
|
@ -734,6 +874,19 @@ namespace gtsam {
|
|||
return DecisionTree(root_->apply(op));
|
||||
}
|
||||
|
||||
/// Apply unary operator with assignment
|
||||
template <typename L, typename Y>
|
||||
DecisionTree<L, Y> DecisionTree<L, Y>::apply(
|
||||
const UnaryAssignment& op) const {
|
||||
// It is unclear what should happen if tree is empty:
|
||||
if (empty()) {
|
||||
throw std::runtime_error(
|
||||
"DecisionTree::apply(unary op) undefined for empty tree.");
|
||||
}
|
||||
Assignment<L> assignment;
|
||||
return DecisionTree(root_->apply(op, assignment));
|
||||
}
|
||||
|
||||
/****************************************************************************/
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y> DecisionTree<L, Y>::apply(const DecisionTree& g,
|
||||
|
|
|
@ -54,6 +54,7 @@ namespace gtsam {
|
|||
|
||||
/** Handy typedefs for unary and binary function types */
|
||||
using Unary = std::function<Y(const Y&)>;
|
||||
using UnaryAssignment = std::function<Y(const Assignment<L>&, const Y&)>;
|
||||
using Binary = std::function<Y(const Y&, const Y&)>;
|
||||
|
||||
/** A label annotated with cardinality */
|
||||
|
@ -103,6 +104,8 @@ namespace gtsam {
|
|||
&DefaultCompare) const = 0;
|
||||
virtual const Y& operator()(const Assignment<L>& x) const = 0;
|
||||
virtual Ptr apply(const Unary& op) const = 0;
|
||||
virtual Ptr apply(const UnaryAssignment& op,
|
||||
const Assignment<L>& assignment) const = 0;
|
||||
virtual Ptr apply_f_op_g(const Node&, const Binary&) const = 0;
|
||||
virtual Ptr apply_g_op_fL(const Leaf&, const Binary&) const = 0;
|
||||
virtual Ptr apply_g_op_fC(const Choice&, const Binary&) const = 0;
|
||||
|
@ -150,7 +153,7 @@ namespace gtsam {
|
|||
/** Create a constant */
|
||||
explicit DecisionTree(const Y& y);
|
||||
|
||||
/** Create a new leaf function splitting on a variable */
|
||||
/// Create tree with 2 assignments `y1`, `y2`, splitting on variable `label`
|
||||
DecisionTree(const L& label, const Y& y1, const Y& y2);
|
||||
|
||||
/** Allow Label+Cardinality for convenience */
|
||||
|
@ -216,9 +219,8 @@ namespace gtsam {
|
|||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/** Make virtual */
|
||||
virtual ~DecisionTree() {
|
||||
}
|
||||
/// Make virtual
|
||||
virtual ~DecisionTree() {}
|
||||
|
||||
/// Check if tree is empty.
|
||||
bool empty() const { return !root_; }
|
||||
|
@ -231,11 +233,13 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* @brief Visit all leaves in depth-first fashion.
|
||||
*
|
||||
* @param f side-effect taking a value.
|
||||
*
|
||||
* @note Due to pruning, leaves might not exhaust choices.
|
||||
*
|
||||
*
|
||||
* @param f (side-effect) Function taking a value.
|
||||
*
|
||||
* @note Due to pruning, the number of leaves may not be the same as the
|
||||
* number of assignments. E.g. if we have a tree on 2 binary variables with
|
||||
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
|
||||
*
|
||||
* Example:
|
||||
* int sum = 0;
|
||||
* auto visitor = [&](int y) { sum += y; };
|
||||
|
@ -246,19 +250,41 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* @brief Visit all leaves in depth-first fashion.
|
||||
*
|
||||
* @param f side-effect taking an assignment and a value.
|
||||
*
|
||||
* @note Due to pruning, leaves might not exhaust choices.
|
||||
*
|
||||
*
|
||||
* @param f (side-effect) Function taking the leaf node pointer.
|
||||
*
|
||||
* @note Due to pruning, the number of leaves may not be the same as the
|
||||
* number of assignments. E.g. if we have a tree on 2 binary variables with
|
||||
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
|
||||
*
|
||||
* Example:
|
||||
* int sum = 0;
|
||||
* auto visitor = [&](const Assignment<L>& choices, int y) { sum += y; };
|
||||
* auto visitor = [&](int y) { sum += y; };
|
||||
* tree.visitWith(visitor);
|
||||
*/
|
||||
template <typename Func>
|
||||
void visitLeaf(Func f) const;
|
||||
|
||||
/**
|
||||
* @brief Visit all leaves in depth-first fashion.
|
||||
*
|
||||
* @param f (side-effect) Function taking an assignment and a value.
|
||||
*
|
||||
* @note Due to pruning, the number of leaves may not be the same as the
|
||||
* number of assignments. E.g. if we have a tree on 2 binary variables with
|
||||
* all values being 1, then there are 2^2=4 assignments, but only 1 leaf.
|
||||
*
|
||||
* Example:
|
||||
* int sum = 0;
|
||||
* auto visitor = [&](const Assignment<L>& assignment, int y) { sum += y; };
|
||||
* tree.visitWith(visitor);
|
||||
*/
|
||||
template <typename Func>
|
||||
void visitWith(Func f) const;
|
||||
|
||||
/// Return the number of leaves in the tree.
|
||||
size_t nrLeaves() const;
|
||||
|
||||
/**
|
||||
* @brief Fold a binary function over the tree, returning accumulator.
|
||||
*
|
||||
|
@ -269,7 +295,7 @@ namespace gtsam {
|
|||
*
|
||||
* @note X is always passed by value.
|
||||
* @note Due to pruning, leaves might not exhaust choices.
|
||||
*
|
||||
*
|
||||
* Example:
|
||||
* auto add = [](const double& y, double x) { return y + x; };
|
||||
* double sum = tree.fold(add, 0.0);
|
||||
|
@ -283,6 +309,16 @@ namespace gtsam {
|
|||
/** apply Unary operation "op" to f */
|
||||
DecisionTree apply(const Unary& op) const;
|
||||
|
||||
/**
|
||||
* @brief Apply Unary operation "op" to f while also providing the
|
||||
* corresponding assignment.
|
||||
*
|
||||
* @param op Function which takes Assignment<L> and Y as input and returns
|
||||
* object of type Y.
|
||||
* @return DecisionTree
|
||||
*/
|
||||
DecisionTree apply(const UnaryAssignment& op) const;
|
||||
|
||||
/** apply binary operation "op" to f and g */
|
||||
DecisionTree apply(const DecisionTree& g, const Binary& op) const;
|
||||
|
||||
|
@ -337,6 +373,13 @@ namespace gtsam {
|
|||
return f.apply(op);
|
||||
}
|
||||
|
||||
/// Apply unary operator `op` with Assignment to DecisionTree `f`.
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y> apply(const DecisionTree<L, Y>& f,
|
||||
const typename DecisionTree<L, Y>::UnaryAssignment& op) {
|
||||
return f.apply(op);
|
||||
}
|
||||
|
||||
/// Apply binary operator `op` to DecisionTree `f`.
|
||||
template<typename L, typename Y>
|
||||
DecisionTree<L, Y> apply(const DecisionTree<L, Y>& f,
|
||||
|
|
|
@ -156,10 +156,7 @@ namespace gtsam {
|
|||
std::vector<std::pair<DiscreteValues, double>> DecisionTreeFactor::enumerate()
|
||||
const {
|
||||
// Get all possible assignments
|
||||
std::vector<std::pair<Key, size_t>> pairs;
|
||||
for (auto& key : keys()) {
|
||||
pairs.emplace_back(key, cardinalities_.at(key));
|
||||
}
|
||||
std::vector<std::pair<Key, size_t>> pairs = discreteKeys();
|
||||
// Reverse to make cartesian product output a more natural ordering.
|
||||
std::vector<std::pair<Key, size_t>> rpairs(pairs.rbegin(), pairs.rend());
|
||||
const auto assignments = DiscreteValues::CartesianProduct(rpairs);
|
||||
|
@ -289,5 +286,43 @@ namespace gtsam {
|
|||
AlgebraicDecisionTree<Key>(keys, table),
|
||||
cardinalities_(keys.cardinalities()) {}
|
||||
|
||||
/* ************************************************************************ */
|
||||
DecisionTreeFactor DecisionTreeFactor::prune(size_t maxNrAssignments) const {
|
||||
const size_t N = maxNrAssignments;
|
||||
|
||||
// Get the probabilities in the decision tree so we can threshold.
|
||||
std::vector<double> probabilities;
|
||||
this->visitLeaf([&](const Leaf& leaf) {
|
||||
size_t nrAssignments = leaf.nrAssignments();
|
||||
double prob = leaf.constant();
|
||||
probabilities.insert(probabilities.end(), nrAssignments, prob);
|
||||
});
|
||||
|
||||
// The number of probabilities can be lower than max_leaves
|
||||
if (probabilities.size() <= N) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
std::sort(probabilities.begin(), probabilities.end(),
|
||||
std::greater<double>{});
|
||||
|
||||
double threshold = probabilities[N - 1];
|
||||
|
||||
// Now threshold the decision tree
|
||||
size_t total = 0;
|
||||
auto thresholdFunc = [threshold, &total, N](const double& value) {
|
||||
if (value < threshold || total >= N) {
|
||||
return 0.0;
|
||||
} else {
|
||||
total += 1;
|
||||
return value;
|
||||
}
|
||||
};
|
||||
DecisionTree<Key, double> thresholded(*this, thresholdFunc);
|
||||
|
||||
// Create pruned decision tree factor and return.
|
||||
return DecisionTreeFactor(this->discreteKeys(), thresholded);
|
||||
}
|
||||
|
||||
/* ************************************************************************ */
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -170,6 +170,26 @@ namespace gtsam {
|
|||
/// Return all the discrete keys associated with this factor.
|
||||
DiscreteKeys discreteKeys() const;
|
||||
|
||||
/**
|
||||
* @brief Prune the decision tree of discrete variables.
|
||||
*
|
||||
* Pruning will set the leaves to be "pruned" to 0 indicating a 0
|
||||
* probability. An assignment is pruned if it is not in the top
|
||||
* `maxNrAssignments` values.
|
||||
*
|
||||
* A violation can occur if there are more
|
||||
* duplicate values than `maxNrAssignments`. A violation here is the need to
|
||||
* un-prune the decision tree (e.g. all assignment values are 1.0). We could
|
||||
* have another case where some subset of duplicates exist (e.g. for a tree
|
||||
* with 8 assignments we have 1, 1, 1, 1, 0.8, 0.7, 0.6, 0.5), but this is
|
||||
* not a violation since the for `maxNrAssignments=5` the top values are (1,
|
||||
* 0.8).
|
||||
*
|
||||
* @param maxNrAssignments The maximum number of assignments to keep.
|
||||
* @return DecisionTreeFactor
|
||||
*/
|
||||
DecisionTreeFactor prune(size_t maxNrAssignments) const;
|
||||
|
||||
/// @}
|
||||
/// @name Wrapper support
|
||||
/// @{
|
||||
|
|
|
@ -225,13 +225,13 @@ DecisionTreeFactor::shared_ptr DiscreteConditional::likelihood(
|
|||
|
||||
/* ****************************************************************************/
|
||||
DecisionTreeFactor::shared_ptr DiscreteConditional::likelihood(
|
||||
size_t parent_value) const {
|
||||
size_t frontal) const {
|
||||
if (nrFrontals() != 1)
|
||||
throw std::invalid_argument(
|
||||
"Single value likelihood can only be invoked on single-variable "
|
||||
"conditional");
|
||||
DiscreteValues values;
|
||||
values.emplace(keys_[0], parent_value);
|
||||
values.emplace(keys_[0], frontal);
|
||||
return likelihood(values);
|
||||
}
|
||||
|
||||
|
|
|
@ -177,7 +177,7 @@ class GTSAM_EXPORT DiscreteConditional
|
|||
const DiscreteValues& frontalValues) const;
|
||||
|
||||
/** Single variable version of likelihood. */
|
||||
DecisionTreeFactor::shared_ptr likelihood(size_t parent_value) const;
|
||||
DecisionTreeFactor::shared_ptr likelihood(size_t frontal) const;
|
||||
|
||||
/**
|
||||
* sample
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
* @author Richard Roberts
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
#include <gtsam/discrete/DiscreteBayesTree.h>
|
||||
#include <gtsam/inference/JunctionTree.h>
|
||||
|
|
|
@ -72,5 +72,5 @@ namespace gtsam {
|
|||
}; // DiscreteKeys
|
||||
|
||||
/// Create a list from two keys
|
||||
DiscreteKeys operator&(const DiscreteKey& key1, const DiscreteKey& key2);
|
||||
GTSAM_EXPORT DiscreteKeys operator&(const DiscreteKey& key1, const DiscreteKey& key2);
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ class DiscreteBayesNet;
|
|||
* Inherits from discrete conditional for convenience, but is not normalized.
|
||||
* Is used in the max-product algorithm.
|
||||
*/
|
||||
class DiscreteLookupTable : public DiscreteConditional {
|
||||
class GTSAM_EXPORT DiscreteLookupTable : public DiscreteConditional {
|
||||
public:
|
||||
using This = DiscreteLookupTable;
|
||||
using shared_ptr = boost::shared_ptr<This>;
|
||||
|
@ -46,7 +46,7 @@ class DiscreteLookupTable : public DiscreteConditional {
|
|||
* @brief Construct a new Discrete Lookup Table object
|
||||
*
|
||||
* @param nFrontals number of frontal variables
|
||||
* @param keys a orted list of gtsam::Keys
|
||||
* @param keys a sorted list of gtsam::Keys
|
||||
* @param potentials the algebraic decision tree with lookup values
|
||||
*/
|
||||
DiscreteLookupTable(size_t nFrontals, const DiscreteKeys& keys,
|
||||
|
|
|
@ -29,7 +29,7 @@ namespace gtsam {
|
|||
/**
|
||||
* A class for computing marginals of variables in a DiscreteFactorGraph
|
||||
*/
|
||||
class GTSAM_EXPORT DiscreteMarginals {
|
||||
class DiscreteMarginals {
|
||||
|
||||
protected:
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ namespace gtsam {
|
|||
* stores cardinality of a Discrete variable. It should be handled naturally in
|
||||
* the new class DiscreteValue, as the variable's type (domain)
|
||||
*/
|
||||
class DiscreteValues : public Assignment<Key> {
|
||||
class GTSAM_EXPORT DiscreteValues : public Assignment<Key> {
|
||||
public:
|
||||
using Base = Assignment<Key>; // base class
|
||||
|
||||
|
|
|
@ -70,7 +70,7 @@ virtual class DecisionTreeFactor : gtsam::DiscreteFactor {
|
|||
string dot(
|
||||
const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter,
|
||||
bool showZero = true) const;
|
||||
std::vector<std::pair<DiscreteValues, double>> enumerate() const;
|
||||
std::vector<std::pair<gtsam::DiscreteValues, double>> enumerate() const;
|
||||
string markdown(const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
string markdown(const gtsam::KeyFormatter& keyFormatter,
|
||||
|
@ -97,7 +97,7 @@ virtual class DiscreteConditional : gtsam::DecisionTreeFactor {
|
|||
const gtsam::Ordering& orderedKeys);
|
||||
gtsam::DiscreteConditional operator*(
|
||||
const gtsam::DiscreteConditional& other) const;
|
||||
DiscreteConditional marginal(gtsam::Key key) const;
|
||||
gtsam::DiscreteConditional marginal(gtsam::Key key) const;
|
||||
void print(string s = "Discrete Conditional\n",
|
||||
const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
|
@ -269,13 +269,16 @@ class DiscreteFactorGraph {
|
|||
gtsam::DiscreteLookupDAG maxProduct(gtsam::Ordering::OrderingType type);
|
||||
gtsam::DiscreteLookupDAG maxProduct(const gtsam::Ordering& ordering);
|
||||
|
||||
gtsam::DiscreteBayesNet eliminateSequential();
|
||||
gtsam::DiscreteBayesNet eliminateSequential(const gtsam::Ordering& ordering);
|
||||
std::pair<gtsam::DiscreteBayesNet, gtsam::DiscreteFactorGraph>
|
||||
gtsam::DiscreteBayesNet* eliminateSequential();
|
||||
gtsam::DiscreteBayesNet* eliminateSequential(gtsam::Ordering::OrderingType type);
|
||||
gtsam::DiscreteBayesNet* eliminateSequential(const gtsam::Ordering& ordering);
|
||||
pair<gtsam::DiscreteBayesNet*, gtsam::DiscreteFactorGraph*>
|
||||
eliminatePartialSequential(const gtsam::Ordering& ordering);
|
||||
gtsam::DiscreteBayesTree eliminateMultifrontal();
|
||||
gtsam::DiscreteBayesTree eliminateMultifrontal(const gtsam::Ordering& ordering);
|
||||
std::pair<gtsam::DiscreteBayesTree, gtsam::DiscreteFactorGraph>
|
||||
|
||||
gtsam::DiscreteBayesTree* eliminateMultifrontal();
|
||||
gtsam::DiscreteBayesTree* eliminateMultifrontal(gtsam::Ordering::OrderingType type);
|
||||
gtsam::DiscreteBayesTree* eliminateMultifrontal(const gtsam::Ordering& ordering);
|
||||
pair<gtsam::DiscreteBayesTree*, gtsam::DiscreteFactorGraph*>
|
||||
eliminatePartialMultifrontal(const gtsam::Ordering& ordering);
|
||||
|
||||
string dot(
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#include <gtsam/discrete/DiscreteKey.h> // make sure we have traits
|
||||
#include <gtsam/discrete/DiscreteValues.h>
|
||||
// headers first to make sure no missing headers
|
||||
//#define DT_NO_PRUNING
|
||||
//#define GTSAM_DT_NO_PRUNING
|
||||
#include <gtsam/discrete/AlgebraicDecisionTree.h>
|
||||
#include <gtsam/discrete/DecisionTree-inl.h> // for convert only
|
||||
#define DISABLE_TIMING
|
||||
|
@ -318,7 +318,7 @@ TEST(ADT, factor_graph) {
|
|||
dot(fg, "Marginalized-3E");
|
||||
fg = fg.combine(L, &add_);
|
||||
dot(fg, "Marginalized-2L");
|
||||
EXPECT(adds = 54);
|
||||
LONGS_EQUAL(49, adds);
|
||||
gttoc_(marg);
|
||||
tictoc_getNode(margNode, marg);
|
||||
elapsed = margNode->secs() + margNode->wall();
|
||||
|
|
|
@ -17,17 +17,19 @@
|
|||
* @date Jan 30, 2012
|
||||
*/
|
||||
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
using namespace boost::assign;
|
||||
// #define DT_DEBUG_MEMORY
|
||||
// #define GTSAM_DT_NO_PRUNING
|
||||
#define DISABLE_DOT
|
||||
#include <gtsam/discrete/DecisionTree-inl.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/discrete/Signature.h>
|
||||
|
||||
// #define DT_DEBUG_MEMORY
|
||||
// #define DT_NO_PRUNING
|
||||
#define DISABLE_DOT
|
||||
#include <gtsam/discrete/DecisionTree-inl.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
using namespace boost::assign;
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
|
@ -88,6 +90,7 @@ struct DT : public DecisionTree<string, int> {
|
|||
auto valueFormatter = [](const int& v) {
|
||||
return (boost::format("%d") % v).str();
|
||||
};
|
||||
std::cout << s;
|
||||
Base::print("", keyFormatter, valueFormatter);
|
||||
}
|
||||
/// Equality method customized to int node type
|
||||
|
@ -148,9 +151,9 @@ TEST(DecisionTree, example) {
|
|||
DOT(notb);
|
||||
|
||||
// Check supplying empty trees yields an exception
|
||||
CHECK_EXCEPTION(apply(empty, &Ring::id), std::runtime_error);
|
||||
CHECK_EXCEPTION(apply(empty, a, &Ring::mul), std::runtime_error);
|
||||
CHECK_EXCEPTION(apply(a, empty, &Ring::mul), std::runtime_error);
|
||||
CHECK_EXCEPTION(gtsam::apply(empty, &Ring::id), std::runtime_error);
|
||||
CHECK_EXCEPTION(gtsam::apply(empty, a, &Ring::mul), std::runtime_error);
|
||||
CHECK_EXCEPTION(gtsam::apply(a, empty, &Ring::mul), std::runtime_error);
|
||||
|
||||
// apply, two nodes, in natural order
|
||||
DT anotb = apply(a, notb, &Ring::mul);
|
||||
|
@ -320,6 +323,49 @@ TEST(DecisionTree, Containers) {
|
|||
StringContainerTree converted(stringIntTree, container_of_int);
|
||||
}
|
||||
|
||||
/* ************************************************************************** */
|
||||
// Test nrAssignments.
|
||||
TEST(DecisionTree, NrAssignments) {
|
||||
pair<string, size_t> A("A", 2), B("B", 2), C("C", 2);
|
||||
DT tree({A, B, C}, "1 1 1 1 1 1 1 1");
|
||||
EXPECT(tree.root_->isLeaf());
|
||||
auto leaf = boost::dynamic_pointer_cast<const DT::Leaf>(tree.root_);
|
||||
EXPECT_LONGS_EQUAL(8, leaf->nrAssignments());
|
||||
|
||||
DT tree2({C, B, A}, "1 1 1 2 3 4 5 5");
|
||||
/* The tree is
|
||||
Choice(C)
|
||||
0 Choice(B)
|
||||
0 0 Leaf 1
|
||||
0 1 Choice(A)
|
||||
0 1 0 Leaf 1
|
||||
0 1 1 Leaf 2
|
||||
1 Choice(B)
|
||||
1 0 Choice(A)
|
||||
1 0 0 Leaf 3
|
||||
1 0 1 Leaf 4
|
||||
1 1 Leaf 5
|
||||
*/
|
||||
|
||||
auto root = boost::dynamic_pointer_cast<const DT::Choice>(tree2.root_);
|
||||
CHECK(root);
|
||||
auto choice0 = boost::dynamic_pointer_cast<const DT::Choice>(root->branches()[0]);
|
||||
CHECK(choice0);
|
||||
EXPECT(choice0->branches()[0]->isLeaf());
|
||||
auto choice00 = boost::dynamic_pointer_cast<const DT::Leaf>(choice0->branches()[0]);
|
||||
CHECK(choice00);
|
||||
EXPECT_LONGS_EQUAL(2, choice00->nrAssignments());
|
||||
|
||||
auto choice1 = boost::dynamic_pointer_cast<const DT::Choice>(root->branches()[1]);
|
||||
CHECK(choice1);
|
||||
auto choice10 = boost::dynamic_pointer_cast<const DT::Choice>(choice1->branches()[0]);
|
||||
CHECK(choice10);
|
||||
auto choice11 = boost::dynamic_pointer_cast<const DT::Leaf>(choice1->branches()[1]);
|
||||
CHECK(choice11);
|
||||
EXPECT(choice11->isLeaf());
|
||||
EXPECT_LONGS_EQUAL(2, choice11->nrAssignments());
|
||||
}
|
||||
|
||||
/* ************************************************************************** */
|
||||
// Test visit.
|
||||
TEST(DecisionTree, visit) {
|
||||
|
@ -344,6 +390,44 @@ TEST(DecisionTree, visitWith) {
|
|||
EXPECT_DOUBLES_EQUAL(6.0, sum, 1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************** */
|
||||
// Test visit, with Choices argument.
|
||||
TEST(DecisionTree, VisitWithPruned) {
|
||||
// Create pruned tree
|
||||
std::pair<string, size_t> A("A", 2), B("B", 2), C("C", 2);
|
||||
std::vector<std::pair<string, size_t>> labels = {C, B, A};
|
||||
std::vector<int> nodes = {0, 0, 2, 3, 4, 4, 6, 7};
|
||||
DT tree(labels, nodes);
|
||||
|
||||
std::vector<Assignment<string>> choices;
|
||||
auto func = [&](const Assignment<string>& choice, const int& d) {
|
||||
choices.push_back(choice);
|
||||
};
|
||||
tree.visitWith(func);
|
||||
|
||||
EXPECT_LONGS_EQUAL(6, choices.size());
|
||||
|
||||
Assignment<string> expectedAssignment;
|
||||
|
||||
expectedAssignment = {{"B", 0}, {"C", 0}};
|
||||
EXPECT(expectedAssignment == choices.at(0));
|
||||
|
||||
expectedAssignment = {{"A", 0}, {"B", 1}, {"C", 0}};
|
||||
EXPECT(expectedAssignment == choices.at(1));
|
||||
|
||||
expectedAssignment = {{"A", 1}, {"B", 1}, {"C", 0}};
|
||||
EXPECT(expectedAssignment == choices.at(2));
|
||||
|
||||
expectedAssignment = {{"B", 0}, {"C", 1}};
|
||||
EXPECT(expectedAssignment == choices.at(3));
|
||||
|
||||
expectedAssignment = {{"A", 0}, {"B", 1}, {"C", 1}};
|
||||
EXPECT(expectedAssignment == choices.at(4));
|
||||
|
||||
expectedAssignment = {{"A", 1}, {"B", 1}, {"C", 1}};
|
||||
EXPECT(expectedAssignment == choices.at(5));
|
||||
}
|
||||
|
||||
/* ************************************************************************** */
|
||||
// Test fold.
|
||||
TEST(DecisionTree, fold) {
|
||||
|
@ -411,6 +495,43 @@ TEST(DecisionTree, threshold) {
|
|||
EXPECT_LONGS_EQUAL(2, thresholded.fold(count, 0));
|
||||
}
|
||||
|
||||
/* ************************************************************************** */
|
||||
// Test apply with assignment.
|
||||
TEST(DecisionTree, ApplyWithAssignment) {
|
||||
// Create three level tree
|
||||
vector<DT::LabelC> keys;
|
||||
keys += DT::LabelC("C", 2), DT::LabelC("B", 2), DT::LabelC("A", 2);
|
||||
DT tree(keys, "1 2 3 4 5 6 7 8");
|
||||
|
||||
DecisionTree<string, double> probTree(
|
||||
keys, "0.01 0.02 0.03 0.04 0.05 0.06 0.07 0.08");
|
||||
double threshold = 0.045;
|
||||
|
||||
// We test pruning one tree by indexing into another.
|
||||
auto pruner = [&](const Assignment<string>& choices, const int& x) {
|
||||
// Prune out all the leaves with even numbers
|
||||
if (probTree(choices) < threshold) {
|
||||
return 0;
|
||||
} else {
|
||||
return x;
|
||||
}
|
||||
};
|
||||
DT prunedTree = tree.apply(pruner);
|
||||
|
||||
DT expectedTree(keys, "0 0 0 0 5 6 7 8");
|
||||
EXPECT(assert_equal(expectedTree, prunedTree));
|
||||
|
||||
size_t count = 0;
|
||||
auto counter = [&](const Assignment<string>& choices, const int& x) {
|
||||
count += 1;
|
||||
return x;
|
||||
};
|
||||
DT prunedTree2 = prunedTree.apply(counter);
|
||||
|
||||
// Check if apply doesn't enumerate all leaves.
|
||||
EXPECT_LONGS_EQUAL(5, count);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
@ -107,12 +107,47 @@ TEST(DecisionTreeFactor, enumerate) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DiscreteFactorGraph, DotWithNames) {
|
||||
// Check pruning of the decision tree works as expected.
|
||||
TEST(DecisionTreeFactor, Prune) {
|
||||
DiscreteKey A(1, 2), B(2, 2), C(3, 2);
|
||||
DecisionTreeFactor f(A & B & C, "1 5 3 7 2 6 4 8");
|
||||
|
||||
// Only keep the leaves with the top 5 values.
|
||||
size_t maxNrAssignments = 5;
|
||||
auto pruned5 = f.prune(maxNrAssignments);
|
||||
|
||||
// Pruned leaves should be 0
|
||||
DecisionTreeFactor expected(A & B & C, "0 5 0 7 0 6 4 8");
|
||||
EXPECT(assert_equal(expected, pruned5));
|
||||
|
||||
// Check for more extreme pruning where we only keep the top 2 leaves
|
||||
maxNrAssignments = 2;
|
||||
auto pruned2 = f.prune(maxNrAssignments);
|
||||
DecisionTreeFactor expected2(A & B & C, "0 0 0 7 0 0 0 8");
|
||||
EXPECT(assert_equal(expected2, pruned2));
|
||||
|
||||
DiscreteKey D(4, 2);
|
||||
DecisionTreeFactor factor(
|
||||
D & C & B & A,
|
||||
"0.0 0.0 0.0 0.60658897 0.61241912 0.61241969 0.61247685 0.61247742 0.0 "
|
||||
"0.0 0.0 0.99995287 1.0 1.0 1.0 1.0");
|
||||
|
||||
DecisionTreeFactor expected3(
|
||||
D & C & B & A,
|
||||
"0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 "
|
||||
"0.999952870000 1.0 1.0 1.0 1.0");
|
||||
maxNrAssignments = 5;
|
||||
auto pruned3 = factor.prune(maxNrAssignments);
|
||||
EXPECT(assert_equal(expected3, pruned3));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(DecisionTreeFactor, DotWithNames) {
|
||||
DiscreteKey A(12, 3), B(5, 2);
|
||||
DecisionTreeFactor f(A & B, "1 2 3 4 5 6");
|
||||
auto formatter = [](Key key) { return key == 12 ? "A" : "B"; };
|
||||
|
||||
for (bool showZero:{true, false}) {
|
||||
for (bool showZero:{true, false}) {
|
||||
string actual = f.dot(formatter, showZero);
|
||||
// pretty weak test, as ids are pointers and not stable across platforms.
|
||||
string expected = "digraph G {";
|
||||
|
@ -194,4 +229,3 @@ int main() {
|
|||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
||||
|
|
|
@ -150,7 +150,6 @@ TEST(DiscreteBayesNet, Dot) {
|
|||
fragment.add((Either | Tuberculosis, LungCancer) = "F T T T");
|
||||
|
||||
string actual = fragment.dot();
|
||||
cout << actual << endl;
|
||||
EXPECT(actual ==
|
||||
"digraph {\n"
|
||||
" size=\"5,5\";\n"
|
||||
|
|
|
@ -243,27 +243,27 @@ TEST(DiscreteBayesTree, Dot) {
|
|||
string actual = self.bayesTree->dot();
|
||||
EXPECT(actual ==
|
||||
"digraph G{\n"
|
||||
"0[label=\"13,11,6,7\"];\n"
|
||||
"0[label=\"13, 11, 6, 7\"];\n"
|
||||
"0->1\n"
|
||||
"1[label=\"14 : 11,13\"];\n"
|
||||
"1[label=\"14 : 11, 13\"];\n"
|
||||
"1->2\n"
|
||||
"2[label=\"9,12 : 14\"];\n"
|
||||
"2[label=\"9, 12 : 14\"];\n"
|
||||
"2->3\n"
|
||||
"3[label=\"3 : 9,12\"];\n"
|
||||
"3[label=\"3 : 9, 12\"];\n"
|
||||
"2->4\n"
|
||||
"4[label=\"2 : 9,12\"];\n"
|
||||
"4[label=\"2 : 9, 12\"];\n"
|
||||
"2->5\n"
|
||||
"5[label=\"8 : 12,14\"];\n"
|
||||
"5[label=\"8 : 12, 14\"];\n"
|
||||
"5->6\n"
|
||||
"6[label=\"1 : 8,12\"];\n"
|
||||
"6[label=\"1 : 8, 12\"];\n"
|
||||
"5->7\n"
|
||||
"7[label=\"0 : 8,12\"];\n"
|
||||
"7[label=\"0 : 8, 12\"];\n"
|
||||
"1->8\n"
|
||||
"8[label=\"10 : 13,14\"];\n"
|
||||
"8[label=\"10 : 13, 14\"];\n"
|
||||
"8->9\n"
|
||||
"9[label=\"5 : 10,13\"];\n"
|
||||
"9[label=\"5 : 10, 13\"];\n"
|
||||
"8->10\n"
|
||||
"10[label=\"4 : 10,13\"];\n"
|
||||
"10[label=\"4 : 10, 13\"];\n"
|
||||
"}");
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/OptionalJacobian.h>
|
||||
#include <boost/concept/assert.hpp>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <iostream>
|
||||
|
||||
namespace gtsam {
|
||||
|
|
|
@ -41,6 +41,9 @@ class GTSAM_EXPORT Cal3Bundler : public Cal3 {
|
|||
public:
|
||||
enum { dimension = 3 };
|
||||
|
||||
///< shared pointer to stereo calibration object
|
||||
using shared_ptr = boost::shared_ptr<Cal3Bundler>;
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <gtsam/geometry/Cal3DS2_Base.h>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
@ -37,6 +38,9 @@ class GTSAM_EXPORT Cal3DS2 : public Cal3DS2_Base {
|
|||
public:
|
||||
enum { dimension = 9 };
|
||||
|
||||
///< shared pointer to stereo calibration object
|
||||
using shared_ptr = boost::shared_ptr<Cal3DS2>;
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
#include <gtsam/geometry/Cal3.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
@ -47,6 +48,9 @@ class GTSAM_EXPORT Cal3DS2_Base : public Cal3 {
|
|||
public:
|
||||
enum { dimension = 9 };
|
||||
|
||||
///< shared pointer to stereo calibration object
|
||||
using shared_ptr = boost::shared_ptr<Cal3DS2_Base>;
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#include <gtsam/geometry/Cal3.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace gtsam {
|
||||
|
|
|
@ -52,6 +52,9 @@ class GTSAM_EXPORT Cal3Unified : public Cal3DS2_Base {
|
|||
public:
|
||||
enum { dimension = 10 };
|
||||
|
||||
///< shared pointer to stereo calibration object
|
||||
using shared_ptr = boost::shared_ptr<Cal3Unified>;
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
* @author Frank Dellaert
|
||||
**/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Group.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
|
||||
|
|
|
@ -117,4 +117,4 @@ Line3 transformTo(const Pose3 &wTc, const Line3 &wL,
|
|||
return Line3(cRl, c_ab[0], c_ab[1]);
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -21,12 +21,27 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
class Line3;
|
||||
|
||||
/**
|
||||
* Transform a line from world to camera frame
|
||||
* @param wTc - Pose3 of camera in world frame
|
||||
* @param wL - Line3 in world frame
|
||||
* @param Dpose - OptionalJacobian of transformed line with respect to p
|
||||
* @param Dline - OptionalJacobian of transformed line with respect to l
|
||||
* @return Transformed line in camera frame
|
||||
*/
|
||||
GTSAM_EXPORT Line3 transformTo(const Pose3 &wTc, const Line3 &wL,
|
||||
OptionalJacobian<4, 6> Dpose = boost::none,
|
||||
OptionalJacobian<4, 4> Dline = boost::none);
|
||||
|
||||
|
||||
/**
|
||||
* A 3D line (R,a,b) : (Rot3,Scalar,Scalar)
|
||||
* @addtogroup geometry
|
||||
* \nosubgrouping
|
||||
*/
|
||||
class Line3 {
|
||||
class GTSAM_EXPORT Line3 {
|
||||
private:
|
||||
Rot3 R_; // Rotation of line about x and y in world frame
|
||||
double a_, b_; // Intersection of line with the world x-y plane rotated by R_
|
||||
|
@ -136,18 +151,6 @@ class Line3 {
|
|||
OptionalJacobian<4, 4> Dline);
|
||||
};
|
||||
|
||||
/**
|
||||
* Transform a line from world to camera frame
|
||||
* @param wTc - Pose3 of camera in world frame
|
||||
* @param wL - Line3 in world frame
|
||||
* @param Dpose - OptionalJacobian of transformed line with respect to p
|
||||
* @param Dline - OptionalJacobian of transformed line with respect to l
|
||||
* @return Transformed line in camera frame
|
||||
*/
|
||||
Line3 transformTo(const Pose3 &wTc, const Line3 &wL,
|
||||
OptionalJacobian<4, 6> Dpose = boost::none,
|
||||
OptionalJacobian<4, 4> Dline = boost::none);
|
||||
|
||||
template<>
|
||||
struct traits<Line3> : public internal::Manifold<Line3> {};
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ namespace gtsam {
|
|||
* \nosubgrouping
|
||||
*/
|
||||
template<typename Calibration>
|
||||
class GTSAM_EXPORT PinholeCamera: public PinholeBaseK<Calibration> {
|
||||
class PinholeCamera: public PinholeBaseK<Calibration> {
|
||||
|
||||
public:
|
||||
|
||||
|
@ -230,13 +230,15 @@ public:
|
|||
Point2 _project2(const POINT& pw, OptionalJacobian<2, dimension> Dcamera,
|
||||
OptionalJacobian<2, FixedDimension<POINT>::value> Dpoint) const {
|
||||
// We just call 3-derivative version in Base
|
||||
Matrix26 Dpose;
|
||||
Eigen::Matrix<double, 2, DimK> Dcal;
|
||||
Point2 pi = Base::project(pw, Dcamera ? &Dpose : 0, Dpoint,
|
||||
Dcamera ? &Dcal : 0);
|
||||
if (Dcamera)
|
||||
if (Dcamera){
|
||||
Matrix26 Dpose;
|
||||
Eigen::Matrix<double, 2, DimK> Dcal;
|
||||
const Point2 pi = Base::project(pw, Dpose, Dpoint, Dcal);
|
||||
*Dcamera << Dpose, Dcal;
|
||||
return pi;
|
||||
return pi;
|
||||
} else {
|
||||
return Base::project(pw, boost::none, Dpoint, boost::none);
|
||||
}
|
||||
}
|
||||
|
||||
/// project a 3D point from world coordinates into the image
|
||||
|
|
|
@ -31,7 +31,7 @@ namespace gtsam {
|
|||
* \nosubgrouping
|
||||
*/
|
||||
template<typename CALIBRATION>
|
||||
class GTSAM_EXPORT PinholeBaseK: public PinholeBase {
|
||||
class PinholeBaseK: public PinholeBase {
|
||||
|
||||
private:
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <gtsam/geometry/Point3.h>
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
||||
|
|
|
@ -213,6 +213,14 @@ Point2 Pose2::transformTo(const Point2& point,
|
|||
return q;
|
||||
}
|
||||
|
||||
Matrix Pose2::transformTo(const Matrix& points) const {
|
||||
if (points.rows() != 2) {
|
||||
throw std::invalid_argument("Pose2:transformTo expects 2*N matrix.");
|
||||
}
|
||||
const Matrix2 Rt = rotation().transpose();
|
||||
return Rt * (points.colwise() - t_); // Eigen broadcasting!
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// see doc/math.lyx, SE(2) section
|
||||
Point2 Pose2::transformFrom(const Point2& point,
|
||||
|
@ -224,6 +232,15 @@ Point2 Pose2::transformFrom(const Point2& point,
|
|||
return q + t_;
|
||||
}
|
||||
|
||||
|
||||
Matrix Pose2::transformFrom(const Matrix& points) const {
|
||||
if (points.rows() != 2) {
|
||||
throw std::invalid_argument("Pose2:transformFrom expects 2*N matrix.");
|
||||
}
|
||||
const Matrix2 R = rotation().matrix();
|
||||
return (R * points).colwise() + t_; // Eigen broadcasting!
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Rot2 Pose2::bearing(const Point2& point,
|
||||
OptionalJacobian<1, 3> Hpose, OptionalJacobian<1, 2> Hpoint) const {
|
||||
|
@ -292,54 +309,77 @@ double Pose2::range(const Pose2& pose,
|
|||
}
|
||||
|
||||
/* *************************************************************************
|
||||
* New explanation, from scan.ml
|
||||
* It finds the angle using a linear method:
|
||||
* q = Pose2::transformFrom(p) = t + R*p
|
||||
* Align finds the angle using a linear method:
|
||||
* a = Pose2::transformFrom(b) = t + R*b
|
||||
* We need to remove the centroids from the data to find the rotation
|
||||
* using dp=[dpx;dpy] and q=[dqx;dqy] we have
|
||||
* |dqx| |c -s| |dpx| |dpx -dpy| |c|
|
||||
* using db=[dbx;dby] and a=[dax;day] we have
|
||||
* |dax| |c -s| |dbx| |dbx -dby| |c|
|
||||
* | | = | | * | | = | | * | | = H_i*cs
|
||||
* |dqy| |s c| |dpy| |dpy dpx| |s|
|
||||
* |day| |s c| |dby| |dby dbx| |s|
|
||||
* where the Hi are the 2*2 matrices. Then we will minimize the criterion
|
||||
* J = \sum_i norm(q_i - H_i * cs)
|
||||
* J = \sum_i norm(a_i - H_i * cs)
|
||||
* Taking the derivative with respect to cs and setting to zero we have
|
||||
* cs = (\sum_i H_i' * q_i)/(\sum H_i'*H_i)
|
||||
* cs = (\sum_i H_i' * a_i)/(\sum H_i'*H_i)
|
||||
* The hessian is diagonal and just divides by a constant, but this
|
||||
* normalization constant is irrelevant, since we take atan2.
|
||||
* i.e., cos ~ sum(dpx*dqx + dpy*dqy) and sin ~ sum(-dpy*dqx + dpx*dqy)
|
||||
* i.e., cos ~ sum(dbx*dax + dby*day) and sin ~ sum(-dby*dax + dbx*day)
|
||||
* The translation is then found from the centroids
|
||||
* as they also satisfy cq = t + R*cp, hence t = cq - R*cp
|
||||
* as they also satisfy ca = t + R*cb, hence t = ca - R*cb
|
||||
*/
|
||||
|
||||
boost::optional<Pose2> align(const vector<Point2Pair>& pairs) {
|
||||
|
||||
size_t n = pairs.size();
|
||||
if (n<2) return boost::none; // we need at least two pairs
|
||||
boost::optional<Pose2> Pose2::Align(const Point2Pairs &ab_pairs) {
|
||||
const size_t n = ab_pairs.size();
|
||||
if (n < 2) {
|
||||
return boost::none; // we need at least 2 pairs
|
||||
}
|
||||
|
||||
// calculate centroids
|
||||
Point2 cp(0,0), cq(0,0);
|
||||
for(const Point2Pair& pair: pairs) {
|
||||
cp += pair.first;
|
||||
cq += pair.second;
|
||||
Point2 ca(0, 0), cb(0, 0);
|
||||
for (const Point2Pair& pair : ab_pairs) {
|
||||
ca += pair.first;
|
||||
cb += pair.second;
|
||||
}
|
||||
double f = 1.0/n;
|
||||
cp *= f; cq *= f;
|
||||
const double f = 1.0/n;
|
||||
ca *= f;
|
||||
cb *= f;
|
||||
|
||||
// calculate cos and sin
|
||||
double c=0,s=0;
|
||||
for(const Point2Pair& pair: pairs) {
|
||||
Point2 dp = pair.first - cp;
|
||||
Point2 dq = pair.second - cq;
|
||||
c += dp.x() * dq.x() + dp.y() * dq.y();
|
||||
s += -dp.y() * dq.x() + dp.x() * dq.y();
|
||||
double c = 0, s = 0;
|
||||
for (const Point2Pair& pair : ab_pairs) {
|
||||
Point2 da = pair.first - ca;
|
||||
Point2 db = pair.second - cb;
|
||||
c += db.x() * da.x() + db.y() * da.y();
|
||||
s += -db.y() * da.x() + db.x() * da.y();
|
||||
}
|
||||
|
||||
// calculate angle and translation
|
||||
double theta = atan2(s,c);
|
||||
Rot2 R = Rot2::fromAngle(theta);
|
||||
Point2 t = cq - R*cp;
|
||||
const double theta = atan2(s, c);
|
||||
const Rot2 R = Rot2::fromAngle(theta);
|
||||
const Point2 t = ca - R*cb;
|
||||
return Pose2(R, t);
|
||||
}
|
||||
|
||||
boost::optional<Pose2> Pose2::Align(const Matrix& a, const Matrix& b) {
|
||||
if (a.rows() != 2 || b.rows() != 2 || a.cols() != b.cols()) {
|
||||
throw std::invalid_argument(
|
||||
"Pose2:Align expects 2*N matrices of equal shape.");
|
||||
}
|
||||
Point2Pairs ab_pairs;
|
||||
for (size_t j=0; j < a.cols(); j++) {
|
||||
ab_pairs.emplace_back(a.col(j), b.col(j));
|
||||
}
|
||||
return Pose2::Align(ab_pairs);
|
||||
}
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
boost::optional<Pose2> align(const Point2Pairs& ba_pairs) {
|
||||
Point2Pairs ab_pairs;
|
||||
for (const Point2Pair &baPair : ba_pairs) {
|
||||
ab_pairs.emplace_back(baPair.second, baPair.first);
|
||||
}
|
||||
return Pose2::Align(ab_pairs);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -92,6 +92,18 @@ public:
|
|||
*this = Expmap(v);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create Pose2 by aligning two point pairs
|
||||
* A pose aTb is estimated between pairs (a_point, b_point) such that
|
||||
* a_point = aTb * b_point
|
||||
* Note this allows for noise on the points but in that case the mapping
|
||||
* will not be exact.
|
||||
*/
|
||||
static boost::optional<Pose2> Align(const Point2Pairs& abPointPairs);
|
||||
|
||||
// Version of Pose2::Align that takes 2 matrices.
|
||||
static boost::optional<Pose2> Align(const Matrix& a, const Matrix& b);
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
@ -199,13 +211,29 @@ public:
|
|||
OptionalJacobian<2, 3> Dpose = boost::none,
|
||||
OptionalJacobian<2, 2> Dpoint = boost::none) const;
|
||||
|
||||
/**
|
||||
* @brief transform many points in world coordinates and transform to Pose.
|
||||
* @param points 2*N matrix in world coordinates
|
||||
* @return points in Pose coordinates, as 2*N Matrix
|
||||
*/
|
||||
Matrix transformTo(const Matrix& points) const;
|
||||
|
||||
/** Return point coordinates in global frame */
|
||||
GTSAM_EXPORT Point2 transformFrom(const Point2& point,
|
||||
OptionalJacobian<2, 3> Dpose = boost::none,
|
||||
OptionalJacobian<2, 2> Dpoint = boost::none) const;
|
||||
|
||||
/**
|
||||
* @brief transform many points in Pose coordinates and transform to world.
|
||||
* @param points 2*N matrix in Pose coordinates
|
||||
* @return points in world coordinates, as 2*N Matrix
|
||||
*/
|
||||
Matrix transformFrom(const Matrix& points) const;
|
||||
|
||||
/** syntactic sugar for transformFrom */
|
||||
inline Point2 operator*(const Point2& point) const { return transformFrom(point);}
|
||||
inline Point2 operator*(const Point2& point) const {
|
||||
return transformFrom(point);
|
||||
}
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
|
@ -315,12 +343,15 @@ inline Matrix wedge<Pose2>(const Vector& xi) {
|
|||
return Matrix(Pose2::wedge(xi(0),xi(1),xi(2))).eval();
|
||||
}
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
/**
|
||||
* @deprecated Use static constructor (with reversed pairs!)
|
||||
* Calculate pose between a vector of 2D point correspondences (p,q)
|
||||
* where q = Pose2::transformFrom(p) = t + R*p
|
||||
*/
|
||||
typedef std::pair<Point2,Point2> Point2Pair;
|
||||
GTSAM_EXPORT boost::optional<Pose2> align(const std::vector<Point2Pair>& pairs);
|
||||
GTSAM_EXPORT boost::optional<Pose2>
|
||||
GTSAM_DEPRECATED align(const Point2Pairs& pairs);
|
||||
#endif
|
||||
|
||||
template <>
|
||||
struct traits<Pose2> : public internal::LieGroup<Pose2> {};
|
||||
|
|
|
@ -59,7 +59,7 @@ Matrix6 Pose3::AdjointMap() const {
|
|||
const Matrix3 R = R_.matrix();
|
||||
Matrix3 A = skewSymmetric(t_.x(), t_.y(), t_.z()) * R;
|
||||
Matrix6 adj;
|
||||
adj << R, Z_3x3, A, R;
|
||||
adj << R, Z_3x3, A, R; // Gives [R 0; A R]
|
||||
return adj;
|
||||
}
|
||||
|
||||
|
@ -354,6 +354,14 @@ Point3 Pose3::transformFrom(const Point3& point, OptionalJacobian<3, 6> Hself,
|
|||
return R_ * point + t_;
|
||||
}
|
||||
|
||||
Matrix Pose3::transformFrom(const Matrix& points) const {
|
||||
if (points.rows() != 3) {
|
||||
throw std::invalid_argument("Pose3:transformFrom expects 3*N matrix.");
|
||||
}
|
||||
const Matrix3 R = R_.matrix();
|
||||
return (R * points).colwise() + t_; // Eigen broadcasting!
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Point3 Pose3::transformTo(const Point3& point, OptionalJacobian<3, 6> Hself,
|
||||
OptionalJacobian<3, 3> Hpoint) const {
|
||||
|
@ -374,6 +382,14 @@ Point3 Pose3::transformTo(const Point3& point, OptionalJacobian<3, 6> Hself,
|
|||
return q;
|
||||
}
|
||||
|
||||
Matrix Pose3::transformTo(const Matrix& points) const {
|
||||
if (points.rows() != 3) {
|
||||
throw std::invalid_argument("Pose3:transformTo expects 3*N matrix.");
|
||||
}
|
||||
const Matrix3 Rt = R_.transpose();
|
||||
return Rt * (points.colwise() - t_); // Eigen broadcasting!
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double Pose3::range(const Point3& point, OptionalJacobian<1, 6> Hself,
|
||||
OptionalJacobian<1, 3> Hpoint) const {
|
||||
|
@ -431,7 +447,7 @@ Unit3 Pose3::bearing(const Pose3& pose, OptionalJacobian<2, 6> Hself,
|
|||
boost::optional<Pose3> Pose3::Align(const Point3Pairs &abPointPairs) {
|
||||
const size_t n = abPointPairs.size();
|
||||
if (n < 3) {
|
||||
return boost::none; // we need at least three pairs
|
||||
return boost::none; // we need at least three pairs
|
||||
}
|
||||
|
||||
// calculate centroids
|
||||
|
@ -451,6 +467,19 @@ boost::optional<Pose3> Pose3::Align(const Point3Pairs &abPointPairs) {
|
|||
return Pose3(aRb, aTb);
|
||||
}
|
||||
|
||||
boost::optional<Pose3> Pose3::Align(const Matrix& a, const Matrix& b) {
|
||||
if (a.rows() != 3 || b.rows() != 3 || a.cols() != b.cols()) {
|
||||
throw std::invalid_argument(
|
||||
"Pose3:Align expects 3*N matrices of equal shape.");
|
||||
}
|
||||
Point3Pairs abPointPairs;
|
||||
for (size_t j=0; j < a.cols(); j++) {
|
||||
abPointPairs.emplace_back(a.col(j), b.col(j));
|
||||
}
|
||||
return Pose3::Align(abPointPairs);
|
||||
}
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42
|
||||
boost::optional<Pose3> align(const Point3Pairs &baPointPairs) {
|
||||
Point3Pairs abPointPairs;
|
||||
for (const Point3Pair &baPair : baPointPairs) {
|
||||
|
@ -458,6 +487,7 @@ boost::optional<Pose3> align(const Point3Pairs &baPointPairs) {
|
|||
}
|
||||
return Pose3::Align(abPointPairs);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::ostream &operator<<(std::ostream &os, const Pose3& pose) {
|
||||
|
|
|
@ -85,6 +85,9 @@ public:
|
|||
*/
|
||||
static boost::optional<Pose3> Align(const std::vector<Point3Pair>& abPointPairs);
|
||||
|
||||
// Version of Pose3::Align that takes 2 matrices.
|
||||
static boost::optional<Pose3> Align(const Matrix& a, const Matrix& b);
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
@ -249,6 +252,13 @@ public:
|
|||
Point3 transformFrom(const Point3& point, OptionalJacobian<3, 6> Hself =
|
||||
boost::none, OptionalJacobian<3, 3> Hpoint = boost::none) const;
|
||||
|
||||
/**
|
||||
* @brief transform many points in Pose coordinates and transform to world.
|
||||
* @param points 3*N matrix in Pose coordinates
|
||||
* @return points in world coordinates, as 3*N Matrix
|
||||
*/
|
||||
Matrix transformFrom(const Matrix& points) const;
|
||||
|
||||
/** syntactic sugar for transformFrom */
|
||||
inline Point3 operator*(const Point3& point) const {
|
||||
return transformFrom(point);
|
||||
|
@ -264,6 +274,13 @@ public:
|
|||
Point3 transformTo(const Point3& point, OptionalJacobian<3, 6> Hself =
|
||||
boost::none, OptionalJacobian<3, 3> Hpoint = boost::none) const;
|
||||
|
||||
/**
|
||||
* @brief transform many points in world coordinates and transform to Pose.
|
||||
* @param points 3*N matrix in world coordinates
|
||||
* @return points in Pose coordinates, as 3*N Matrix
|
||||
*/
|
||||
Matrix transformTo(const Matrix& points) const;
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
namespace gtsam {
|
||||
|
||||
template <>
|
||||
GTSAM_EXPORT void SOn::Hat(const Vector &xi, Eigen::Ref<Matrix> X) {
|
||||
void SOn::Hat(const Vector &xi, Eigen::Ref<Matrix> X) {
|
||||
size_t n = AmbientDim(xi.size());
|
||||
if (n < 2)
|
||||
throw std::invalid_argument("SO<N>::Hat: n<2 not supported");
|
||||
|
@ -48,7 +48,7 @@ GTSAM_EXPORT void SOn::Hat(const Vector &xi, Eigen::Ref<Matrix> X) {
|
|||
}
|
||||
}
|
||||
|
||||
template <> GTSAM_EXPORT Matrix SOn::Hat(const Vector &xi) {
|
||||
template <> Matrix SOn::Hat(const Vector &xi) {
|
||||
size_t n = AmbientDim(xi.size());
|
||||
Matrix X(n, n); // allocate space for n*n skew-symmetric matrix
|
||||
SOn::Hat(xi, X);
|
||||
|
@ -56,7 +56,6 @@ template <> GTSAM_EXPORT Matrix SOn::Hat(const Vector &xi) {
|
|||
}
|
||||
|
||||
template <>
|
||||
GTSAM_EXPORT
|
||||
Vector SOn::Vee(const Matrix& X) {
|
||||
const size_t n = X.rows();
|
||||
if (n < 2) throw std::invalid_argument("SO<N>::Hat: n<2 not supported");
|
||||
|
@ -104,7 +103,9 @@ SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1,
|
|||
}
|
||||
|
||||
// Dynamic version of vec
|
||||
template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const {
|
||||
template <>
|
||||
typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const
|
||||
{
|
||||
const size_t n = rows(), n2 = n * n;
|
||||
|
||||
// Vectorize
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#include <gtsam/dllexport.h>
|
||||
#include <Eigen/Core>
|
||||
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
|
||||
#include <iostream> // TODO(frank): how to avoid?
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
|
@ -356,17 +358,21 @@ Vector SOn::Vee(const Matrix& X);
|
|||
using DynamicJacobian = OptionalJacobian<Eigen::Dynamic, Eigen::Dynamic>;
|
||||
|
||||
template <>
|
||||
GTSAM_EXPORT
|
||||
SOn LieGroup<SOn, Eigen::Dynamic>::compose(const SOn& g, DynamicJacobian H1,
|
||||
DynamicJacobian H2) const;
|
||||
|
||||
template <>
|
||||
GTSAM_EXPORT
|
||||
SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1,
|
||||
DynamicJacobian H2) const;
|
||||
|
||||
/*
|
||||
* Specialize dynamic vec.
|
||||
*/
|
||||
template <> typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const;
|
||||
template <>
|
||||
GTSAM_EXPORT
|
||||
typename SOn::VectorN2 SOn::vec(DynamicJacobian H) const;
|
||||
|
||||
/** Serialization function */
|
||||
template<class Archive>
|
||||
|
|
|
@ -23,11 +23,12 @@
|
|||
#include <gtsam/geometry/Point2.h>
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/base/VectorSerialization.h>
|
||||
#include <gtsam/base/Matrix.h>
|
||||
#include <gtsam/dllexport.h>
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
|
||||
#include <random>
|
||||
#include <string>
|
||||
|
@ -39,7 +40,7 @@
|
|||
namespace gtsam {
|
||||
|
||||
/// Represents a 3D point on a unit sphere.
|
||||
class Unit3 {
|
||||
class GTSAM_EXPORT Unit3 {
|
||||
|
||||
private:
|
||||
|
||||
|
@ -96,7 +97,7 @@ public:
|
|||
}
|
||||
|
||||
/// Named constructor from Point3 with optional Jacobian
|
||||
GTSAM_EXPORT static Unit3 FromPoint3(const Point3& point, //
|
||||
static Unit3 FromPoint3(const Point3& point, //
|
||||
OptionalJacobian<2, 3> H = boost::none);
|
||||
|
||||
/**
|
||||
|
@ -105,7 +106,7 @@ public:
|
|||
* std::mt19937 engine(42);
|
||||
* Unit3 unit = Unit3::Random(engine);
|
||||
*/
|
||||
GTSAM_EXPORT static Unit3 Random(std::mt19937 & rng);
|
||||
static Unit3 Random(std::mt19937 & rng);
|
||||
|
||||
/// @}
|
||||
|
||||
|
@ -115,7 +116,7 @@ public:
|
|||
friend std::ostream& operator<<(std::ostream& os, const Unit3& pair);
|
||||
|
||||
/// The print fuction
|
||||
GTSAM_EXPORT void print(const std::string& s = std::string()) const;
|
||||
void print(const std::string& s = std::string()) const;
|
||||
|
||||
/// The equals function with tolerance
|
||||
bool equals(const Unit3& s, double tol = 1e-9) const {
|
||||
|
@ -132,16 +133,16 @@ public:
|
|||
* tangent to the sphere at the current direction.
|
||||
* Provides derivatives of the basis with the two basis vectors stacked up as a 6x1.
|
||||
*/
|
||||
GTSAM_EXPORT const Matrix32& basis(OptionalJacobian<6, 2> H = boost::none) const;
|
||||
const Matrix32& basis(OptionalJacobian<6, 2> H = boost::none) const;
|
||||
|
||||
/// Return skew-symmetric associated with 3D point on unit sphere
|
||||
GTSAM_EXPORT Matrix3 skew() const;
|
||||
Matrix3 skew() const;
|
||||
|
||||
/// Return unit-norm Point3
|
||||
GTSAM_EXPORT Point3 point3(OptionalJacobian<3, 2> H = boost::none) const;
|
||||
Point3 point3(OptionalJacobian<3, 2> H = boost::none) const;
|
||||
|
||||
/// Return unit-norm Vector
|
||||
GTSAM_EXPORT Vector3 unitVector(OptionalJacobian<3, 2> H = boost::none) const;
|
||||
Vector3 unitVector(OptionalJacobian<3, 2> H = boost::none) const;
|
||||
|
||||
/// Return scaled direction as Point3
|
||||
friend Point3 operator*(double s, const Unit3& d) {
|
||||
|
@ -149,20 +150,20 @@ public:
|
|||
}
|
||||
|
||||
/// Return dot product with q
|
||||
GTSAM_EXPORT double dot(const Unit3& q, OptionalJacobian<1,2> H1 = boost::none, //
|
||||
double dot(const Unit3& q, OptionalJacobian<1,2> H1 = boost::none, //
|
||||
OptionalJacobian<1,2> H2 = boost::none) const;
|
||||
|
||||
/// Signed, vector-valued error between two directions
|
||||
/// @deprecated, errorVector has the proper derivatives, this confusingly has only the second.
|
||||
GTSAM_EXPORT Vector2 error(const Unit3& q, OptionalJacobian<2, 2> H_q = boost::none) const;
|
||||
Vector2 error(const Unit3& q, OptionalJacobian<2, 2> H_q = boost::none) const;
|
||||
|
||||
/// Signed, vector-valued error between two directions
|
||||
/// NOTE(hayk): This method has zero derivatives if this (p) and q are orthogonal.
|
||||
GTSAM_EXPORT Vector2 errorVector(const Unit3& q, OptionalJacobian<2, 2> H_p = boost::none, //
|
||||
Vector2 errorVector(const Unit3& q, OptionalJacobian<2, 2> H_p = boost::none, //
|
||||
OptionalJacobian<2, 2> H_q = boost::none) const;
|
||||
|
||||
/// Distance between two directions
|
||||
GTSAM_EXPORT double distance(const Unit3& q, OptionalJacobian<1, 2> H = boost::none) const;
|
||||
double distance(const Unit3& q, OptionalJacobian<1, 2> H = boost::none) const;
|
||||
|
||||
/// Cross-product between two Unit3s
|
||||
Unit3 cross(const Unit3& q) const {
|
||||
|
@ -195,10 +196,10 @@ public:
|
|||
};
|
||||
|
||||
/// The retract function
|
||||
GTSAM_EXPORT Unit3 retract(const Vector2& v, OptionalJacobian<2,2> H = boost::none) const;
|
||||
Unit3 retract(const Vector2& v, OptionalJacobian<2,2> H = boost::none) const;
|
||||
|
||||
/// The local coordinates function
|
||||
GTSAM_EXPORT Vector2 localCoordinates(const Unit3& s) const;
|
||||
Vector2 localCoordinates(const Unit3& s) const;
|
||||
|
||||
/// @}
|
||||
|
||||
|
|
|
@ -372,6 +372,9 @@ class Pose2 {
|
|||
Pose2(const gtsam::Rot2& r, const gtsam::Point2& t);
|
||||
Pose2(Vector v);
|
||||
|
||||
static boost::optional<gtsam::Pose2> Align(const gtsam::Point2Pairs& abPointPairs);
|
||||
static boost::optional<gtsam::Pose2> Align(const gtsam::Matrix& a, const gtsam::Matrix& b);
|
||||
|
||||
// Testable
|
||||
void print(string s = "") const;
|
||||
bool equals(const gtsam::Pose2& pose, double tol) const;
|
||||
|
@ -406,6 +409,10 @@ class Pose2 {
|
|||
gtsam::Point2 transformFrom(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 transformTo(const gtsam::Point2& p) const;
|
||||
|
||||
// Matrix versions
|
||||
Matrix transformFrom(const Matrix& points) const;
|
||||
Matrix transformTo(const Matrix& points) const;
|
||||
|
||||
// Standard Interface
|
||||
double x() const;
|
||||
double y() const;
|
||||
|
@ -420,8 +427,6 @@ class Pose2 {
|
|||
void serialize() const;
|
||||
};
|
||||
|
||||
boost::optional<gtsam::Pose2> align(const gtsam::Point2Pairs& pairs);
|
||||
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
class Pose3 {
|
||||
// Standard Constructors
|
||||
|
@ -431,6 +436,9 @@ class Pose3 {
|
|||
Pose3(const gtsam::Pose2& pose2);
|
||||
Pose3(Matrix mat);
|
||||
|
||||
static boost::optional<gtsam::Pose3> Align(const gtsam::Point3Pairs& abPointPairs);
|
||||
static boost::optional<gtsam::Pose3> Align(const gtsam::Matrix& a, const gtsam::Matrix& b);
|
||||
|
||||
// Testable
|
||||
void print(string s = "") const;
|
||||
bool equals(const gtsam::Pose3& pose, double tol) const;
|
||||
|
@ -470,6 +478,10 @@ class Pose3 {
|
|||
gtsam::Point3 transformFrom(const gtsam::Point3& point) const;
|
||||
gtsam::Point3 transformTo(const gtsam::Point3& point) const;
|
||||
|
||||
// Matrix versions
|
||||
Matrix transformFrom(const Matrix& points) const;
|
||||
Matrix transformTo(const Matrix& points) const;
|
||||
|
||||
// Standard Interface
|
||||
gtsam::Rot3 rotation() const;
|
||||
gtsam::Point3 translation() const;
|
||||
|
@ -572,7 +584,13 @@ class Cal3_S2 {
|
|||
|
||||
// Action on Point2
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
|
||||
// Standard Interface
|
||||
double fx() const;
|
||||
|
@ -611,7 +629,13 @@ virtual class Cal3DS2_Base {
|
|||
|
||||
// Action on Point2
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
@ -668,7 +692,13 @@ virtual class Cal3Unified : gtsam::Cal3DS2_Base {
|
|||
// Note: the signature of this functions differ from the functions
|
||||
// with equal name in the base class.
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
@ -694,7 +724,13 @@ class Cal3Fisheye {
|
|||
|
||||
// Action on Point2
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
|
||||
// Standard Interface
|
||||
double fx() const;
|
||||
|
@ -757,7 +793,13 @@ class Cal3Bundler {
|
|||
|
||||
// Action on Point2
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 calibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p) const;
|
||||
gtsam::Point2 uncalibrate(const gtsam::Point2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dp) const;
|
||||
|
||||
// Standard Interface
|
||||
double fx() const;
|
||||
|
@ -795,12 +837,25 @@ class CalibratedCamera {
|
|||
|
||||
// Action on Point3
|
||||
gtsam::Point2 project(const gtsam::Point3& point) const;
|
||||
gtsam::Point2 project(const gtsam::Point3& point,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpoint);
|
||||
gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const;
|
||||
gtsam::Point3 backproject(const gtsam::Point2& p, double depth,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dpose,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dp,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_ddepth);
|
||||
|
||||
static gtsam::Point2 Project(const gtsam::Point3& cameraPoint);
|
||||
|
||||
// Standard Interface
|
||||
gtsam::Pose3 pose() const;
|
||||
double range(const gtsam::Point3& point) const;
|
||||
double range(const gtsam::Point3& point, Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpoint);
|
||||
double range(const gtsam::Pose3& pose) const;
|
||||
double range(const gtsam::Pose3& point, Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpose);
|
||||
double range(const gtsam::CalibratedCamera& camera) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
|
@ -812,6 +867,7 @@ template <CALIBRATION>
|
|||
class PinholeCamera {
|
||||
// Standard Constructors and Named Constructors
|
||||
PinholeCamera();
|
||||
PinholeCamera(const gtsam::PinholeCamera<CALIBRATION> other);
|
||||
PinholeCamera(const gtsam::Pose3& pose);
|
||||
PinholeCamera(const gtsam::Pose3& pose, const CALIBRATION& K);
|
||||
static This Level(const CALIBRATION& K, const gtsam::Pose2& pose,
|
||||
|
@ -838,9 +894,22 @@ class PinholeCamera {
|
|||
static gtsam::Point2 Project(const gtsam::Point3& cameraPoint);
|
||||
pair<gtsam::Point2, bool> projectSafe(const gtsam::Point3& pw) const;
|
||||
gtsam::Point2 project(const gtsam::Point3& point);
|
||||
gtsam::Point2 project(const gtsam::Point3& point,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpose,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpoint,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dcal);
|
||||
gtsam::Point3 backproject(const gtsam::Point2& p, double depth) const;
|
||||
gtsam::Point3 backproject(const gtsam::Point2& p, double depth,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dpose,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dp,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_ddepth,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dresult_dcal);
|
||||
double range(const gtsam::Point3& point);
|
||||
double range(const gtsam::Point3& point, Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpoint);
|
||||
double range(const gtsam::Pose3& pose);
|
||||
double range(const gtsam::Pose3& pose, Eigen::Ref<Eigen::MatrixXd> Dcamera,
|
||||
Eigen::Ref<Eigen::MatrixXd> Dpose);
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
@ -909,9 +978,18 @@ class StereoCamera {
|
|||
static size_t Dim();
|
||||
|
||||
// Transformations and measurement functions
|
||||
gtsam::StereoPoint2 project(const gtsam::Point3& point);
|
||||
gtsam::StereoPoint2 project(const gtsam::Point3& point) const;
|
||||
gtsam::Point3 backproject(const gtsam::StereoPoint2& p) const;
|
||||
|
||||
// project with Jacobian
|
||||
gtsam::StereoPoint2 project2(const gtsam::Point3& point,
|
||||
Eigen::Ref<Eigen::MatrixXd> H1,
|
||||
Eigen::Ref<Eigen::MatrixXd> H2) const;
|
||||
|
||||
gtsam::Point3 backproject2(const gtsam::StereoPoint2& p,
|
||||
Eigen::Ref<Eigen::MatrixXd> H1,
|
||||
Eigen::Ref<Eigen::MatrixXd> H2) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
};
|
||||
|
|
|
@ -160,7 +160,7 @@ TEST(Cal3Bundler, retract) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Cal3_S2, Print) {
|
||||
TEST(Cal3Bundler, Print) {
|
||||
Cal3Bundler cal(1, 2, 3, 4, 5);
|
||||
std::stringstream os;
|
||||
os << "f: " << cal.fx() << ", k1: " << cal.k1() << ", k2: " << cal.k2()
|
||||
|
|
|
@ -717,123 +717,112 @@ TEST( Pose2, range_pose )
|
|||
/* ************************************************************************* */
|
||||
|
||||
TEST(Pose2, align_1) {
|
||||
Pose2 expected(Rot2::fromAngle(0), Point2(10,10));
|
||||
|
||||
vector<Point2Pair> correspondences;
|
||||
Point2Pair pq1(make_pair(Point2(0,0), Point2(10,10)));
|
||||
Point2Pair pq2(make_pair(Point2(20,10), Point2(30,20)));
|
||||
correspondences += pq1, pq2;
|
||||
|
||||
boost::optional<Pose2> actual = align(correspondences);
|
||||
EXPECT(assert_equal(expected, *actual));
|
||||
Pose2 expected(Rot2::fromAngle(0), Point2(10, 10));
|
||||
Point2Pairs ab_pairs {{Point2(10, 10), Point2(0, 0)},
|
||||
{Point2(30, 20), Point2(20, 10)}};
|
||||
boost::optional<Pose2> aTb = Pose2::Align(ab_pairs);
|
||||
EXPECT(assert_equal(expected, *aTb));
|
||||
}
|
||||
|
||||
TEST(Pose2, align_2) {
|
||||
Point2 t(20,10);
|
||||
Point2 t(20, 10);
|
||||
Rot2 R = Rot2::fromAngle(M_PI/2.0);
|
||||
Pose2 expected(R, t);
|
||||
|
||||
vector<Point2Pair> correspondences;
|
||||
Point2 p1(0,0), p2(10,0);
|
||||
Point2 q1 = expected.transformFrom(p1), q2 = expected.transformFrom(p2);
|
||||
EXPECT(assert_equal(Point2(20,10),q1));
|
||||
EXPECT(assert_equal(Point2(20,20),q2));
|
||||
Point2Pair pq1(make_pair(p1, q1));
|
||||
Point2Pair pq2(make_pair(p2, q2));
|
||||
correspondences += pq1, pq2;
|
||||
Point2 b1(0, 0), b2(10, 0);
|
||||
Point2Pairs ab_pairs {{expected.transformFrom(b1), b1},
|
||||
{expected.transformFrom(b2), b2}};
|
||||
|
||||
boost::optional<Pose2> actual = align(correspondences);
|
||||
EXPECT(assert_equal(expected, *actual));
|
||||
boost::optional<Pose2> aTb = Pose2::Align(ab_pairs);
|
||||
EXPECT(assert_equal(expected, *aTb));
|
||||
}
|
||||
|
||||
namespace align_3 {
|
||||
Point2 t(10,10);
|
||||
Point2 t(10, 10);
|
||||
Pose2 expected(Rot2::fromAngle(2*M_PI/3), t);
|
||||
Point2 p1(0,0), p2(10,0), p3(10,10);
|
||||
Point2 q1 = expected.transformFrom(p1), q2 = expected.transformFrom(p2), q3 = expected.transformFrom(p3);
|
||||
Point2 b1(0, 0), b2(10, 0), b3(10, 10);
|
||||
Point2 a1 = expected.transformFrom(b1),
|
||||
a2 = expected.transformFrom(b2),
|
||||
a3 = expected.transformFrom(b3);
|
||||
}
|
||||
|
||||
TEST(Pose2, align_3) {
|
||||
using namespace align_3;
|
||||
|
||||
vector<Point2Pair> correspondences;
|
||||
Point2Pair pq1(make_pair(p1, q1));
|
||||
Point2Pair pq2(make_pair(p2, q2));
|
||||
Point2Pair pq3(make_pair(p3, q3));
|
||||
correspondences += pq1, pq2, pq3;
|
||||
Point2Pairs ab_pairs;
|
||||
Point2Pair ab1(make_pair(a1, b1));
|
||||
Point2Pair ab2(make_pair(a2, b2));
|
||||
Point2Pair ab3(make_pair(a3, b3));
|
||||
ab_pairs += ab1, ab2, ab3;
|
||||
|
||||
boost::optional<Pose2> actual = align(correspondences);
|
||||
EXPECT(assert_equal(expected, *actual));
|
||||
boost::optional<Pose2> aTb = Pose2::Align(ab_pairs);
|
||||
EXPECT(assert_equal(expected, *aTb));
|
||||
}
|
||||
|
||||
namespace {
|
||||
/* ************************************************************************* */
|
||||
// Prototype code to align two triangles using a rigid transform
|
||||
/* ************************************************************************* */
|
||||
struct Triangle { size_t i_,j_,k_;};
|
||||
struct Triangle { size_t i_, j_, k_;};
|
||||
|
||||
boost::optional<Pose2> align2(const Point2Vector& ps, const Point2Vector& qs,
|
||||
boost::optional<Pose2> align2(const Point2Vector& as, const Point2Vector& bs,
|
||||
const pair<Triangle, Triangle>& trianglePair) {
|
||||
const Triangle& t1 = trianglePair.first, t2 = trianglePair.second;
|
||||
vector<Point2Pair> correspondences;
|
||||
correspondences += make_pair(ps[t1.i_],qs[t2.i_]), make_pair(ps[t1.j_],qs[t2.j_]), make_pair(ps[t1.k_],qs[t2.k_]);
|
||||
return align(correspondences);
|
||||
Point2Pairs ab_pairs = {{as[t1.i_], bs[t2.i_]},
|
||||
{as[t1.j_], bs[t2.j_]},
|
||||
{as[t1.k_], bs[t2.k_]}};
|
||||
return Pose2::Align(ab_pairs);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Pose2, align_4) {
|
||||
using namespace align_3;
|
||||
|
||||
Point2Vector ps,qs;
|
||||
ps += p1, p2, p3;
|
||||
qs += q3, q1, q2; // note in 3,1,2 order !
|
||||
Point2Vector as, bs;
|
||||
as += a1, a2, a3;
|
||||
bs += b3, b1, b2; // note in 3,1,2 order !
|
||||
|
||||
Triangle t1; t1.i_=0; t1.j_=1; t1.k_=2;
|
||||
Triangle t2; t2.i_=1; t2.j_=2; t2.k_=0;
|
||||
|
||||
boost::optional<Pose2> actual = align2(ps, qs, make_pair(t1,t2));
|
||||
boost::optional<Pose2> actual = align2(as, bs, {t1, t2});
|
||||
EXPECT(assert_equal(expected, *actual));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
namespace {
|
||||
Pose2 id;
|
||||
Pose2 T1(M_PI / 4.0, Point2(sqrt(0.5), sqrt(0.5)));
|
||||
Pose2 T2(M_PI / 2.0, Point2(0.0, 2.0));
|
||||
} // namespace
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Pose2 , Invariants) {
|
||||
Pose2 id;
|
||||
|
||||
EXPECT(check_group_invariants(id,id));
|
||||
EXPECT(check_group_invariants(id,T1));
|
||||
EXPECT(check_group_invariants(T2,id));
|
||||
EXPECT(check_group_invariants(T2,T1));
|
||||
|
||||
EXPECT(check_manifold_invariants(id,id));
|
||||
EXPECT(check_manifold_invariants(id,T1));
|
||||
EXPECT(check_manifold_invariants(T2,id));
|
||||
EXPECT(check_manifold_invariants(T2,T1));
|
||||
TEST(Pose2, Invariants) {
|
||||
EXPECT(check_group_invariants(id, id));
|
||||
EXPECT(check_group_invariants(id, T1));
|
||||
EXPECT(check_group_invariants(T2, id));
|
||||
EXPECT(check_group_invariants(T2, T1));
|
||||
|
||||
EXPECT(check_manifold_invariants(id, id));
|
||||
EXPECT(check_manifold_invariants(id, T1));
|
||||
EXPECT(check_manifold_invariants(T2, id));
|
||||
EXPECT(check_manifold_invariants(T2, T1));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Pose2 , LieGroupDerivatives) {
|
||||
Pose2 id;
|
||||
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id,id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id,T2);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2,id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2,T1);
|
||||
|
||||
TEST(Pose2, LieGroupDerivatives) {
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id, id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id, T2);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2, id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2, T1);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Pose2 , ChartDerivatives) {
|
||||
Pose2 id;
|
||||
|
||||
CHECK_CHART_DERIVATIVES(id,id);
|
||||
CHECK_CHART_DERIVATIVES(id,T2);
|
||||
CHECK_CHART_DERIVATIVES(T2,id);
|
||||
CHECK_CHART_DERIVATIVES(T2,T1);
|
||||
TEST(Pose2, ChartDerivatives) {
|
||||
CHECK_CHART_DERIVATIVES(id, id);
|
||||
CHECK_CHART_DERIVATIVES(id, T2);
|
||||
CHECK_CHART_DERIVATIVES(T2, id);
|
||||
CHECK_CHART_DERIVATIVES(T2, T1);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
|
|
@ -80,12 +80,6 @@ TEST(Quaternion , Compose) {
|
|||
EXPECT(traits<Q>::Equals(expected, actual));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
Vector3 Q_z_axis(0, 0, 1);
|
||||
Q id(Eigen::AngleAxisd(0, Q_z_axis));
|
||||
Q R1(Eigen::AngleAxisd(1, Q_z_axis));
|
||||
Q R2(Eigen::AngleAxisd(2, Vector3(0, 1, 0)));
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Quaternion , Between) {
|
||||
Vector3 z_axis(0, 0, 1);
|
||||
|
@ -108,7 +102,15 @@ TEST(Quaternion , Inverse) {
|
|||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Quaternion , Invariants) {
|
||||
namespace {
|
||||
Vector3 Q_z_axis(0, 0, 1);
|
||||
Q id(Eigen::AngleAxisd(0, Q_z_axis));
|
||||
Q R1(Eigen::AngleAxisd(1, Q_z_axis));
|
||||
Q R2(Eigen::AngleAxisd(2, Vector3(0, 1, 0)));
|
||||
} // namespace
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Quaternion, Invariants) {
|
||||
EXPECT(check_group_invariants(id, id));
|
||||
EXPECT(check_group_invariants(id, R1));
|
||||
EXPECT(check_group_invariants(R2, id));
|
||||
|
@ -121,7 +123,7 @@ TEST(Quaternion , Invariants) {
|
|||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Quaternion , LieGroupDerivatives) {
|
||||
TEST(Quaternion, LieGroupDerivatives) {
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id, id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id, R2);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(R2, id);
|
||||
|
@ -129,7 +131,7 @@ TEST(Quaternion , LieGroupDerivatives) {
|
|||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Quaternion , ChartDerivatives) {
|
||||
TEST(Quaternion, ChartDerivatives) {
|
||||
CHECK_CHART_DERIVATIVES(id, id);
|
||||
CHECK_CHART_DERIVATIVES(id, R2);
|
||||
CHECK_CHART_DERIVATIVES(R2, id);
|
||||
|
|
|
@ -156,44 +156,39 @@ TEST( Rot2, relativeBearing )
|
|||
}
|
||||
|
||||
//******************************************************************************
|
||||
namespace {
|
||||
Rot2 id;
|
||||
Rot2 T1(0.1);
|
||||
Rot2 T2(0.2);
|
||||
} // namespace
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Rot2 , Invariants) {
|
||||
Rot2 id;
|
||||
|
||||
EXPECT(check_group_invariants(id,id));
|
||||
EXPECT(check_group_invariants(id,T1));
|
||||
EXPECT(check_group_invariants(T2,id));
|
||||
EXPECT(check_group_invariants(T2,T1));
|
||||
|
||||
EXPECT(check_manifold_invariants(id,id));
|
||||
EXPECT(check_manifold_invariants(id,T1));
|
||||
EXPECT(check_manifold_invariants(T2,id));
|
||||
EXPECT(check_manifold_invariants(T2,T1));
|
||||
TEST(Rot2, Invariants) {
|
||||
EXPECT(check_group_invariants(id, id));
|
||||
EXPECT(check_group_invariants(id, T1));
|
||||
EXPECT(check_group_invariants(T2, id));
|
||||
EXPECT(check_group_invariants(T2, T1));
|
||||
|
||||
EXPECT(check_manifold_invariants(id, id));
|
||||
EXPECT(check_manifold_invariants(id, T1));
|
||||
EXPECT(check_manifold_invariants(T2, id));
|
||||
EXPECT(check_manifold_invariants(T2, T1));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Rot2 , LieGroupDerivatives) {
|
||||
Rot2 id;
|
||||
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id,id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id,T2);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2,id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2,T1);
|
||||
|
||||
TEST(Rot2, LieGroupDerivatives) {
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id, id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id, T2);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2, id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2, T1);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Rot2 , ChartDerivatives) {
|
||||
Rot2 id;
|
||||
|
||||
CHECK_CHART_DERIVATIVES(id,id);
|
||||
CHECK_CHART_DERIVATIVES(id,T2);
|
||||
CHECK_CHART_DERIVATIVES(T2,id);
|
||||
CHECK_CHART_DERIVATIVES(T2,T1);
|
||||
TEST(Rot2, ChartDerivatives) {
|
||||
CHECK_CHART_DERIVATIVES(id, id);
|
||||
CHECK_CHART_DERIVATIVES(id, T2);
|
||||
CHECK_CHART_DERIVATIVES(T2, id);
|
||||
CHECK_CHART_DERIVATIVES(T2, T1);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -640,46 +640,44 @@ TEST( Rot3, slerp)
|
|||
}
|
||||
|
||||
//******************************************************************************
|
||||
namespace {
|
||||
Rot3 id;
|
||||
Rot3 T1(Rot3::AxisAngle(Vector3(0, 0, 1), 1));
|
||||
Rot3 T2(Rot3::AxisAngle(Vector3(0, 1, 0), 2));
|
||||
} // namespace
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Rot3 , Invariants) {
|
||||
Rot3 id;
|
||||
TEST(Rot3, Invariants) {
|
||||
EXPECT(check_group_invariants(id, id));
|
||||
EXPECT(check_group_invariants(id, T1));
|
||||
EXPECT(check_group_invariants(T2, id));
|
||||
EXPECT(check_group_invariants(T2, T1));
|
||||
EXPECT(check_group_invariants(T1, T2));
|
||||
|
||||
EXPECT(check_group_invariants(id,id));
|
||||
EXPECT(check_group_invariants(id,T1));
|
||||
EXPECT(check_group_invariants(T2,id));
|
||||
EXPECT(check_group_invariants(T2,T1));
|
||||
EXPECT(check_group_invariants(T1,T2));
|
||||
|
||||
EXPECT(check_manifold_invariants(id,id));
|
||||
EXPECT(check_manifold_invariants(id,T1));
|
||||
EXPECT(check_manifold_invariants(T2,id));
|
||||
EXPECT(check_manifold_invariants(T2,T1));
|
||||
EXPECT(check_manifold_invariants(T1,T2));
|
||||
EXPECT(check_manifold_invariants(id, id));
|
||||
EXPECT(check_manifold_invariants(id, T1));
|
||||
EXPECT(check_manifold_invariants(T2, id));
|
||||
EXPECT(check_manifold_invariants(T2, T1));
|
||||
EXPECT(check_manifold_invariants(T1, T2));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Rot3 , LieGroupDerivatives) {
|
||||
Rot3 id;
|
||||
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id,id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id,T2);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2,id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T1,T2);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2,T1);
|
||||
TEST(Rot3, LieGroupDerivatives) {
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id, id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(id, T2);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2, id);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T1, T2);
|
||||
CHECK_LIE_GROUP_DERIVATIVES(T2, T1);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(Rot3 , ChartDerivatives) {
|
||||
Rot3 id;
|
||||
TEST(Rot3, ChartDerivatives) {
|
||||
if (ROT3_DEFAULT_COORDINATES_MODE == Rot3::EXPMAP) {
|
||||
CHECK_CHART_DERIVATIVES(id,id);
|
||||
CHECK_CHART_DERIVATIVES(id,T2);
|
||||
CHECK_CHART_DERIVATIVES(T2,id);
|
||||
CHECK_CHART_DERIVATIVES(T1,T2);
|
||||
CHECK_CHART_DERIVATIVES(T2,T1);
|
||||
CHECK_CHART_DERIVATIVES(id, id);
|
||||
CHECK_CHART_DERIVATIVES(id, T2);
|
||||
CHECK_CHART_DERIVATIVES(T2, id);
|
||||
CHECK_CHART_DERIVATIVES(T1, T2);
|
||||
CHECK_CHART_DERIVATIVES(T2, T1);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -67,10 +67,12 @@ TEST(SO3, ClosestTo) {
|
|||
}
|
||||
|
||||
//******************************************************************************
|
||||
namespace {
|
||||
SO3 id;
|
||||
Vector3 z_axis(0, 0, 1), v2(1, 2, 0), v3(1, 2, 3);
|
||||
SO3 R1(Eigen::AngleAxisd(0.1, z_axis));
|
||||
SO3 R2(Eigen::AngleAxisd(0.2, z_axis));
|
||||
} // namespace
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SO3, ChordalMean) {
|
||||
|
@ -79,16 +81,16 @@ TEST(SO3, ChordalMean) {
|
|||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check that Hat specialization is equal to dynamic version
|
||||
TEST(SO3, Hat) {
|
||||
// Check that Hat specialization is equal to dynamic version
|
||||
EXPECT(assert_equal(SO3::Hat(z_axis), SOn::Hat(z_axis)));
|
||||
EXPECT(assert_equal(SO3::Hat(v2), SOn::Hat(v2)));
|
||||
EXPECT(assert_equal(SO3::Hat(v3), SOn::Hat(v3)));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check that Hat specialization is equal to dynamic version
|
||||
TEST(SO3, Vee) {
|
||||
// Check that Hat specialization is equal to dynamic version
|
||||
auto X1 = SOn::Hat(z_axis), X2 = SOn::Hat(v2), X3 = SOn::Hat(v3);
|
||||
EXPECT(assert_equal(SO3::Vee(X1), SOn::Vee(X1)));
|
||||
EXPECT(assert_equal(SO3::Vee(X2), SOn::Vee(X2)));
|
||||
|
|
|
@ -48,6 +48,14 @@ TEST(SO4, Concept) {
|
|||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(SO4, Random) {
|
||||
std::mt19937 rng(42);
|
||||
auto Q = SO4::Random(rng);
|
||||
EXPECT_LONGS_EQUAL(4, Q.matrix().rows());
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
namespace {
|
||||
SO4 id;
|
||||
Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished();
|
||||
SO4 Q1 = SO4::Expmap(v1);
|
||||
|
@ -55,13 +63,8 @@ Vector6 v2 = (Vector(6) << 0.00, 0.00, 0.00, 0.01, 0.02, 0.03).finished();
|
|||
SO4 Q2 = SO4::Expmap(v2);
|
||||
Vector6 v3 = (Vector(6) << 1, 2, 3, 4, 5, 6).finished();
|
||||
SO4 Q3 = SO4::Expmap(v3);
|
||||
} // namespace
|
||||
|
||||
//******************************************************************************
|
||||
TEST(SO4, Random) {
|
||||
std::mt19937 rng(42);
|
||||
auto Q = SO4::Random(rng);
|
||||
EXPECT_LONGS_EQUAL(4, Q.matrix().rows());
|
||||
}
|
||||
//******************************************************************************
|
||||
TEST(SO4, Expmap) {
|
||||
// If we do exponential map in SO(3) subgroup, topleft should be equal to R1.
|
||||
|
@ -84,16 +87,16 @@ TEST(SO4, Expmap) {
|
|||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check that Hat specialization is equal to dynamic version
|
||||
TEST(SO4, Hat) {
|
||||
// Check that Hat specialization is equal to dynamic version
|
||||
EXPECT(assert_equal(SO4::Hat(v1), SOn::Hat(v1)));
|
||||
EXPECT(assert_equal(SO4::Hat(v2), SOn::Hat(v2)));
|
||||
EXPECT(assert_equal(SO4::Hat(v3), SOn::Hat(v3)));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check that Hat specialization is equal to dynamic version
|
||||
TEST(SO4, Vee) {
|
||||
// Check that Hat specialization is equal to dynamic version
|
||||
auto X1 = SOn::Hat(v1), X2 = SOn::Hat(v2), X3 = SOn::Hat(v3);
|
||||
EXPECT(assert_equal(SO4::Vee(X1), SOn::Vee(X1)));
|
||||
EXPECT(assert_equal(SO4::Vee(X2), SOn::Vee(X2)));
|
||||
|
@ -116,8 +119,8 @@ TEST(SO4, Retract) {
|
|||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Check that Cayley is identical to dynamic version
|
||||
TEST(SO4, Local) {
|
||||
// Check that Cayley is identical to dynamic version
|
||||
EXPECT(
|
||||
assert_equal(id.localCoordinates(Q1), SOn(4).localCoordinates(SOn(Q1))));
|
||||
EXPECT(
|
||||
|
@ -166,9 +169,7 @@ TEST(SO4, vec) {
|
|||
Matrix actualH;
|
||||
const Vector16 actual = Q2.vec(actualH);
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
std::function<Vector16(const SO4&)> f = [](const SO4& Q) {
|
||||
return Q.vec();
|
||||
};
|
||||
std::function<Vector16(const SO4&)> f = [](const SO4& Q) { return Q.vec(); };
|
||||
const Matrix numericalH = numericalDerivative11(f, Q2, 1e-5);
|
||||
EXPECT(assert_equal(numericalH, actualH));
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/geometry/Cal3Bundler.h>
|
||||
#include <gtsam/geometry/Cal3DS2.h>
|
||||
#include <gtsam/geometry/CameraSet.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/SphericalCamera.h>
|
||||
|
@ -38,7 +39,7 @@ using namespace boost::assign;
|
|||
// Some common constants
|
||||
|
||||
static const boost::shared_ptr<Cal3_S2> sharedCal = //
|
||||
boost::make_shared<Cal3_S2>(1500, 1200, 0, 640, 480);
|
||||
boost::make_shared<Cal3_S2>(1500, 1200, 0.1, 640, 480);
|
||||
|
||||
// Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
static const Rot3 upright = Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2);
|
||||
|
@ -95,11 +96,123 @@ TEST(triangulation, twoPoses) {
|
|||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Simple test with a well-behaved two camera situation with Cal3DS2 calibration.
|
||||
TEST(triangulation, twoPosesCal3DS2) {
|
||||
static const boost::shared_ptr<Cal3DS2> sharedDistortedCal = //
|
||||
boost::make_shared<Cal3DS2>(1500, 1200, 0, 640, 480, -.3, 0.1, 0.0001,
|
||||
-0.0003);
|
||||
|
||||
PinholeCamera<Cal3DS2> camera1Distorted(pose1, *sharedDistortedCal);
|
||||
|
||||
PinholeCamera<Cal3DS2> camera2Distorted(pose2, *sharedDistortedCal);
|
||||
|
||||
// 0. Project two landmarks into two cameras and triangulate
|
||||
Point2 z1Distorted = camera1Distorted.project(landmark);
|
||||
Point2 z2Distorted = camera2Distorted.project(landmark);
|
||||
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
poses += pose1, pose2;
|
||||
measurements += z1Distorted, z2Distorted;
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
|
||||
// 1. Test simple DLT, perfect in no noise situation
|
||||
bool optimize = false;
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<Cal3DS2>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual1, 1e-7));
|
||||
|
||||
// 2. test with optimization on, same answer
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3DS2>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-7));
|
||||
|
||||
// 3. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
measurements.at(0) += Point2(0.1, 0.5);
|
||||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
optimize = false;
|
||||
boost::optional<Point3> actual3 = //
|
||||
triangulatePoint3<Cal3DS2>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual3, 1e-3));
|
||||
|
||||
// 4. Now with optimization on
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual4 = //
|
||||
triangulatePoint3<Cal3DS2>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-3));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Simple test with a well-behaved two camera situation with Fisheye
|
||||
// calibration.
|
||||
TEST(triangulation, twoPosesFisheye) {
|
||||
using Calibration = Cal3Fisheye;
|
||||
static const boost::shared_ptr<Calibration> sharedDistortedCal = //
|
||||
boost::make_shared<Calibration>(1500, 1200, .1, 640, 480, -.3, 0.1,
|
||||
0.0001, -0.0003);
|
||||
|
||||
PinholeCamera<Calibration> camera1Distorted(pose1, *sharedDistortedCal);
|
||||
|
||||
PinholeCamera<Calibration> camera2Distorted(pose2, *sharedDistortedCal);
|
||||
|
||||
// 0. Project two landmarks into two cameras and triangulate
|
||||
Point2 z1Distorted = camera1Distorted.project(landmark);
|
||||
Point2 z2Distorted = camera2Distorted.project(landmark);
|
||||
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
poses += pose1, pose2;
|
||||
measurements += z1Distorted, z2Distorted;
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
|
||||
// 1. Test simple DLT, perfect in no noise situation
|
||||
bool optimize = false;
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<Calibration>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual1, 1e-7));
|
||||
|
||||
// 2. test with optimization on, same answer
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Calibration>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-7));
|
||||
|
||||
// 3. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
measurements.at(0) += Point2(0.1, 0.5);
|
||||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
optimize = false;
|
||||
boost::optional<Point3> actual3 = //
|
||||
triangulatePoint3<Calibration>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual3, 1e-3));
|
||||
|
||||
// 4. Now with optimization on
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual4 = //
|
||||
triangulatePoint3<Calibration>(poses, sharedDistortedCal, measurements,
|
||||
rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-3));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Similar, but now with Bundler calibration
|
||||
TEST(triangulation, twoPosesBundler) {
|
||||
boost::shared_ptr<Cal3Bundler> bundlerCal = //
|
||||
boost::make_shared<Cal3Bundler>(1500, 0, 0, 640, 480);
|
||||
boost::make_shared<Cal3Bundler>(1500, 0.1, 0.2, 640, 480);
|
||||
PinholeCamera<Cal3Bundler> camera1(pose1, *bundlerCal);
|
||||
PinholeCamera<Cal3Bundler> camera2(pose2, *bundlerCal);
|
||||
|
||||
|
@ -117,7 +230,8 @@ TEST(triangulation, twoPosesBundler) {
|
|||
double rank_tol = 1e-9;
|
||||
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3<Cal3Bundler>(poses, bundlerCal, measurements, rank_tol, optimize);
|
||||
triangulatePoint3<Cal3Bundler>(poses, bundlerCal, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-7));
|
||||
|
||||
// Add some noise and try again
|
||||
|
@ -125,8 +239,9 @@ TEST(triangulation, twoPosesBundler) {
|
|||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3Bundler>(poses, bundlerCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19847), *actual2, 1e-4));
|
||||
triangulatePoint3<Cal3Bundler>(poses, bundlerCal, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19847), *actual2, 1e-3));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
@ -336,6 +451,31 @@ TEST(triangulation, fourPoses_distinct_Ks) {
|
|||
#endif
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(triangulation, fourPoses_distinct_Ks_distortion) {
|
||||
Cal3DS2 K1(1500, 1200, 0, 640, 480, -.3, 0.1, 0.0001, -0.0003);
|
||||
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
PinholeCamera<Cal3DS2> camera1(pose1, K1);
|
||||
|
||||
// create second camera 1 meter to the right of first camera
|
||||
Cal3DS2 K2(1600, 1300, 0, 650, 440, -.2, 0.05, 0.0002, -0.0001);
|
||||
PinholeCamera<Cal3DS2> camera2(pose2, K2);
|
||||
|
||||
// 1. Project two landmarks into two cameras and triangulate
|
||||
Point2 z1 = camera1.project(landmark);
|
||||
Point2 z2 = camera2.project(landmark);
|
||||
|
||||
CameraSet<PinholeCamera<Cal3DS2>> cameras;
|
||||
Point2Vector measurements;
|
||||
|
||||
cameras += camera1, camera2;
|
||||
measurements += z1, z2;
|
||||
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3<Cal3DS2>(cameras, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(triangulation, outliersAndFarLandmarks) {
|
||||
Cal3_S2 K1(1500, 1200, 0, 640, 480);
|
||||
|
|
|
@ -227,6 +227,109 @@ std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>> projectionMatricesFrom
|
|||
return projection_matrices;
|
||||
}
|
||||
|
||||
/** Create a pinhole calibration from a different Cal3 object, removing
|
||||
* distortion.
|
||||
*
|
||||
* @tparam CALIBRATION Original calibration object.
|
||||
* @param cal Input calibration object.
|
||||
* @return Cal3_S2 with only the pinhole elements of cal.
|
||||
*/
|
||||
template <class CALIBRATION>
|
||||
Cal3_S2 createPinholeCalibration(const CALIBRATION& cal) {
|
||||
const auto& K = cal.K();
|
||||
return Cal3_S2(K(0, 0), K(1, 1), K(0, 1), K(0, 2), K(1, 2));
|
||||
}
|
||||
|
||||
/** Internal undistortMeasurement to be used by undistortMeasurement and
|
||||
* undistortMeasurements */
|
||||
template <class CALIBRATION, class MEASUREMENT>
|
||||
MEASUREMENT undistortMeasurementInternal(
|
||||
const CALIBRATION& cal, const MEASUREMENT& measurement,
|
||||
boost::optional<Cal3_S2> pinholeCal = boost::none) {
|
||||
if (!pinholeCal) {
|
||||
pinholeCal = createPinholeCalibration(cal);
|
||||
}
|
||||
return pinholeCal->uncalibrate(cal.calibrate(measurement));
|
||||
}
|
||||
|
||||
/** Remove distortion for measurements so as if the measurements came from a
|
||||
* pinhole camera.
|
||||
*
|
||||
* Removes distortion but maintains the K matrix of the initial cal. Operates by
|
||||
* calibrating using full calibration and uncalibrating with only the pinhole
|
||||
* component of the calibration.
|
||||
* @tparam CALIBRATION Calibration type to use.
|
||||
* @param cal Calibration with which measurements were taken.
|
||||
* @param measurements Vector of measurements to undistort.
|
||||
* @return measurements with the effect of the distortion of sharedCal removed.
|
||||
*/
|
||||
template <class CALIBRATION>
|
||||
Point2Vector undistortMeasurements(const CALIBRATION& cal,
|
||||
const Point2Vector& measurements) {
|
||||
Cal3_S2 pinholeCalibration = createPinholeCalibration(cal);
|
||||
Point2Vector undistortedMeasurements;
|
||||
// Calibrate with cal and uncalibrate with pinhole version of cal so that
|
||||
// measurements are undistorted.
|
||||
std::transform(measurements.begin(), measurements.end(),
|
||||
std::back_inserter(undistortedMeasurements),
|
||||
[&cal, &pinholeCalibration](const Point2& measurement) {
|
||||
return undistortMeasurementInternal<CALIBRATION>(
|
||||
cal, measurement, pinholeCalibration);
|
||||
});
|
||||
return undistortedMeasurements;
|
||||
}
|
||||
|
||||
/** Specialization for Cal3_S2 as it doesn't need to be undistorted. */
|
||||
template <>
|
||||
inline Point2Vector undistortMeasurements(const Cal3_S2& cal,
|
||||
const Point2Vector& measurements) {
|
||||
return measurements;
|
||||
}
|
||||
|
||||
/** Remove distortion for measurements so as if the measurements came from a
|
||||
* pinhole camera.
|
||||
*
|
||||
* Removes distortion but maintains the K matrix of the initial calibrations.
|
||||
* Operates by calibrating using full calibration and uncalibrating with only
|
||||
* the pinhole component of the calibration.
|
||||
* @tparam CAMERA Camera type to use.
|
||||
* @param cameras Cameras corresponding to each measurement.
|
||||
* @param measurements Vector of measurements to undistort.
|
||||
* @return measurements with the effect of the distortion of the camera removed.
|
||||
*/
|
||||
template <class CAMERA>
|
||||
typename CAMERA::MeasurementVector undistortMeasurements(
|
||||
const CameraSet<CAMERA>& cameras,
|
||||
const typename CAMERA::MeasurementVector& measurements) {
|
||||
const size_t num_meas = cameras.size();
|
||||
assert(num_meas == measurements.size());
|
||||
typename CAMERA::MeasurementVector undistortedMeasurements(num_meas);
|
||||
for (size_t ii = 0; ii < num_meas; ++ii) {
|
||||
// Calibrate with cal and uncalibrate with pinhole version of cal so that
|
||||
// measurements are undistorted.
|
||||
undistortedMeasurements[ii] =
|
||||
undistortMeasurementInternal<typename CAMERA::CalibrationType>(
|
||||
cameras[ii].calibration(), measurements[ii]);
|
||||
}
|
||||
return undistortedMeasurements;
|
||||
}
|
||||
|
||||
/** Specialize for Cal3_S2 to do nothing. */
|
||||
template <class CAMERA = PinholeCamera<Cal3_S2>>
|
||||
inline PinholeCamera<Cal3_S2>::MeasurementVector undistortMeasurements(
|
||||
const CameraSet<PinholeCamera<Cal3_S2>>& cameras,
|
||||
const PinholeCamera<Cal3_S2>::MeasurementVector& measurements) {
|
||||
return measurements;
|
||||
}
|
||||
|
||||
/** Specialize for SphericalCamera to do nothing. */
|
||||
template <class CAMERA = SphericalCamera>
|
||||
inline SphericalCamera::MeasurementVector undistortMeasurements(
|
||||
const CameraSet<SphericalCamera>& cameras,
|
||||
const SphericalCamera::MeasurementVector& measurements) {
|
||||
return measurements;
|
||||
}
|
||||
|
||||
/**
|
||||
* Function to triangulate 3D landmark point from an arbitrary number
|
||||
* of poses (at least 2) using the DLT. The function checks that the
|
||||
|
@ -253,8 +356,13 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
|
|||
// construct projection matrices from poses & calibration
|
||||
auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal);
|
||||
|
||||
// Undistort the measurements, leaving only the pinhole elements in effect.
|
||||
auto undistortedMeasurements =
|
||||
undistortMeasurements<CALIBRATION>(*sharedCal, measurements);
|
||||
|
||||
// Triangulate linearly
|
||||
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
||||
Point3 point =
|
||||
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
|
||||
|
||||
// Then refine using non-linear optimization
|
||||
if (optimize)
|
||||
|
@ -300,7 +408,13 @@ Point3 triangulatePoint3(
|
|||
|
||||
// construct projection matrices from poses & calibration
|
||||
auto projection_matrices = projectionMatricesFromCameras(cameras);
|
||||
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
||||
|
||||
// Undistort the measurements, leaving only the pinhole elements in effect.
|
||||
auto undistortedMeasurements =
|
||||
undistortMeasurements<CAMERA>(cameras, measurements);
|
||||
|
||||
Point3 point =
|
||||
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
|
||||
|
||||
// The n refine using non-linear optimization
|
||||
if (optimize)
|
||||
|
|
|
@ -95,7 +95,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
template <class CLIQUE>
|
||||
void BayesTree<CLIQUE>::dot(std::ostream& s, sharedClique clique,
|
||||
const KeyFormatter& indexFormatter,
|
||||
const KeyFormatter& keyFormatter,
|
||||
int parentnum) const {
|
||||
static int num = 0;
|
||||
bool first = true;
|
||||
|
@ -104,10 +104,10 @@ namespace gtsam {
|
|||
std::string parent = out.str();
|
||||
parent += "[label=\"";
|
||||
|
||||
for (Key index : clique->conditional_->frontals()) {
|
||||
if (!first) parent += ",";
|
||||
for (Key key : clique->conditional_->frontals()) {
|
||||
if (!first) parent += ", ";
|
||||
first = false;
|
||||
parent += indexFormatter(index);
|
||||
parent += keyFormatter(key);
|
||||
}
|
||||
|
||||
if (clique->parent()) {
|
||||
|
@ -116,10 +116,10 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
first = true;
|
||||
for (Key sep : clique->conditional_->parents()) {
|
||||
if (!first) parent += ",";
|
||||
for (Key parentKey : clique->conditional_->parents()) {
|
||||
if (!first) parent += ", ";
|
||||
first = false;
|
||||
parent += indexFormatter(sep);
|
||||
parent += keyFormatter(parentKey);
|
||||
}
|
||||
parent += "\"];\n";
|
||||
s << parent;
|
||||
|
@ -127,7 +127,7 @@ namespace gtsam {
|
|||
|
||||
for (sharedClique c : clique->children) {
|
||||
num++;
|
||||
dot(s, c, indexFormatter, parentnum);
|
||||
dot(s, c, keyFormatter, parentnum);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@ namespace gtsam {
|
|||
return nodes_.empty();
|
||||
}
|
||||
|
||||
/** return nodes */
|
||||
/** Return nodes. Each node is a clique of variables obtained after elimination. */
|
||||
const Nodes& nodes() const { return nodes_; }
|
||||
|
||||
/** Access node by variable */
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam/base/treeTraversal-inst.h>
|
||||
|
||||
#ifdef GTSAM_USE_TBB
|
||||
#include <mutex>
|
||||
#endif
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -120,12 +124,25 @@ struct EliminationData {
|
|||
size_t myIndexInParent;
|
||||
FastVector<sharedFactor> childFactors;
|
||||
boost::shared_ptr<BTNode> bayesTreeNode;
|
||||
#ifdef GTSAM_USE_TBB
|
||||
boost::shared_ptr<std::mutex> writeLock;
|
||||
#endif
|
||||
|
||||
EliminationData(EliminationData* _parentData, size_t nChildren) :
|
||||
parentData(_parentData), bayesTreeNode(boost::make_shared<BTNode>()) {
|
||||
parentData(_parentData), bayesTreeNode(boost::make_shared<BTNode>())
|
||||
#ifdef GTSAM_USE_TBB
|
||||
, writeLock(boost::make_shared<std::mutex>())
|
||||
#endif
|
||||
{
|
||||
if (parentData) {
|
||||
#ifdef GTSAM_USE_TBB
|
||||
parentData->writeLock->lock();
|
||||
#endif
|
||||
myIndexInParent = parentData->childFactors.size();
|
||||
parentData->childFactors.push_back(sharedFactor());
|
||||
#ifdef GTSAM_USE_TBB
|
||||
parentData->writeLock->unlock();
|
||||
#endif
|
||||
} else {
|
||||
myIndexInParent = 0;
|
||||
}
|
||||
|
@ -196,8 +213,15 @@ struct EliminationData {
|
|||
nodesIndex_.insert(std::make_pair(j, myData.bayesTreeNode));
|
||||
|
||||
// Store remaining factor in parent's gathered factors
|
||||
if (!eliminationResult.second->empty())
|
||||
if (!eliminationResult.second->empty()) {
|
||||
#ifdef GTSAM_USE_TBB
|
||||
myData.parentData->writeLock->lock();
|
||||
#endif
|
||||
myData.parentData->childFactors[myData.myIndexInParent] = eliminationResult.second;
|
||||
#ifdef GTSAM_USE_TBB
|
||||
myData.parentData->writeLock->unlock();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
|
|
@ -25,11 +25,7 @@
|
|||
#include <gtsam/3rdparty/CCOLAMD/Include/ccolamd.h>
|
||||
|
||||
#ifdef GTSAM_SUPPORT_NESTED_DISSECTION
|
||||
#ifdef GTSAM_USE_SYSTEM_METIS
|
||||
#include <metis.h>
|
||||
#else
|
||||
#include <gtsam/3rdparty/metis/include/metis.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
using namespace std;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue