Merge remote-tracking branch 'svn/trunk' into NLO

Conflicts:
	.cproject
release/4.3a0
Richard Roberts 2012-03-31 01:02:45 +00:00
commit 46a1fb1103
88 changed files with 2432 additions and 2767 deletions

255
.cproject
View File

@ -21,7 +21,7 @@
<folderInfo id="cdt.managedbuild.toolchain.gnu.macosx.base.1359703544.2031210194" name="/" resourcePath="">
<toolChain id="cdt.managedbuild.toolchain.gnu.macosx.base.677243255" name="cdt.managedbuild.toolchain.gnu.macosx.base" superClass="cdt.managedbuild.toolchain.gnu.macosx.base">
<targetPlatform archList="all" binaryParser="org.eclipse.cdt.core.ELF;org.eclipse.cdt.core.MachO64" id="cdt.managedbuild.target.gnu.platform.macosx.base.752782918" name="Debug Platform" osList="macosx" superClass="cdt.managedbuild.target.gnu.platform.macosx.base"/>
<builder arguments="" buildPath="${ProjDirPath}/build" command="make" id="cdt.managedbuild.target.gnu.builder.macosx.base.319933862" keepEnvironmentInBuildfile="false" managedBuildOn="false" name="Gnu Make Builder" parallelBuildOn="true" parallelizationNumber="7" superClass="cdt.managedbuild.target.gnu.builder.macosx.base"/>
<builder arguments="" buildPath="${ProjDirPath}/build" command="make" id="cdt.managedbuild.target.gnu.builder.macosx.base.319933862" keepEnvironmentInBuildfile="false" managedBuildOn="false" name="Gnu Make Builder" parallelBuildOn="true" parallelizationNumber="5" superClass="cdt.managedbuild.target.gnu.builder.macosx.base"/>
<tool id="cdt.managedbuild.tool.macosx.c.linker.macosx.base.457360678" name="MacOS X C Linker" superClass="cdt.managedbuild.tool.macosx.c.linker.macosx.base"/>
<tool id="cdt.managedbuild.tool.macosx.cpp.linker.macosx.base.1011140787" name="MacOS X C++ Linker" superClass="cdt.managedbuild.tool.macosx.cpp.linker.macosx.base">
<inputType id="cdt.managedbuild.tool.macosx.cpp.linker.input.1032375444" superClass="cdt.managedbuild.tool.macosx.cpp.linker.input">
@ -285,7 +285,6 @@
</profile>
</scannerConfigBuildInfo>
</storageModule>
<storageModule moduleId="org.eclipse.cdt.internal.ui.text.commentOwnerProjectMappings"/>
<storageModule moduleId="org.eclipse.cdt.make.core.buildtargets">
<buildTargets>
<target name="check" path="build/base" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
@ -746,38 +745,6 @@
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.slam" path="build/gtsam/slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2 VERBOSE=1</buildArguments>
<buildTarget>check.slam</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timing.slam" path="build/gtsam/slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.slam</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.nonlinear" path="build/gtsam/nonlinear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2 VERBOSE=1</buildArguments>
<buildTarget>check.nonlinear</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timing.nonlinear" path="build/gtsam/nonlinear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.nonlinear</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="nonlinear.testValues.run" path="build/gtsam/nonlinear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
@ -858,22 +825,6 @@
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.inference" path="build/gtsam/inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2 VERBOSE=1</buildArguments>
<buildTarget>check.inference</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timing.inference" path="build/gtsam/inference" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.inference</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testVSLAMGraph" path="build/slam/tests" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2</buildArguments>
@ -1442,14 +1393,6 @@
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="geometry.check" path="build/gtsam/geometry" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>geometry.check</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="tests/testPoint3.run" path="build/gtsam/geometry" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2</buildArguments>
@ -1522,22 +1465,6 @@
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.geometry" path="build/gtsam/geometry" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2 VERBOSE=1</buildArguments>
<buildTarget>check.geometry</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timing.geometry" path="build/gtsam/geometry" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.geometry</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="all" path="slam" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2</buildArguments>
@ -1679,78 +1606,6 @@
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="base.testDSFVector.run" path="build/gtsam/base" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>base.testDSFVector.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="base.testTestableAssertions.run" path="build/gtsam/base" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>base.testTestableAssertions.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="base.testVector.run" path="build/gtsam/base" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>base.testVector.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="base.testMatrix.run" path="build/gtsam/base" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>base.testMatrix.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="base.testNumericalDerivative.run" path="build/gtsam/base" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>base.testNumericalDerivative.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="base.testBlockMatrices.run" path="build/gtsam/base" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>base.testBlockMatrices.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="base.testCholesky.run" path="build/gtsam/base" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>base.testCholesky.run</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.base" path="build/gtsam/base" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>check.base</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timing.base" path="build/gtsam/base" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.base</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="tests/testVectorValues.run" path="build/gtsam/linear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2</buildArguments>
@ -1823,22 +1678,6 @@
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.linear" path="build/gtsam/linear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2 VERBOSE=1</buildArguments>
<buildTarget>check.linear</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timing.linear" path="build/gtsam/linear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.linear</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="linear.testSerializationLinear.run" path="build/gtsam/linear" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
@ -2045,18 +1884,98 @@
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testGeneralSFMFactor.run" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<target name="check.base" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j7</buildArguments>
<buildTarget>testGeneralSFMFactor.run</buildTarget>
<buildArguments>-j5</buildArguments>
<buildTarget>check.base</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="testNonlinearOptimizer.run" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<target name="timing.base" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j7</buildArguments>
<buildTarget>testNonlinearOptimizer.run</buildTarget>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.base</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.geometry" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2 VERBOSE=1</buildArguments>
<buildTarget>check.geometry</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timing.geometry" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.geometry</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.inference" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2 VERBOSE=1</buildArguments>
<buildTarget>check.inference</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timing.inference" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.inference</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.linear" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2 VERBOSE=1</buildArguments>
<buildTarget>check.linear</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timing.linear" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.linear</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.nonlinear" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2 VERBOSE=1</buildArguments>
<buildTarget>check.nonlinear</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timing.nonlinear" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.nonlinear</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="check.slam" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j2 VERBOSE=1</buildArguments>
<buildTarget>check.slam</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>false</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>
</target>
<target name="timing.slam" path="" targetID="org.eclipse.cdt.build.MakeTargetBuilder">
<buildCommand>make</buildCommand>
<buildArguments>-j5</buildArguments>
<buildTarget>timing.slam</buildTarget>
<stopOnError>true</stopOnError>
<useDefaultCommand>true</useDefaultCommand>
<runAllBuilders>true</runAllBuilders>

View File

@ -20,23 +20,25 @@ if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_BINARY_DIR})
endif()
# Default to Debug mode
if(NOT FIRST_PASS_DONE)
if(NOT FIRST_PASS_DONE AND NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "Debug" CACHE STRING
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel."
"Choose the type of build, options are: None Debug Release Timing Profiling RelWithDebInfo."
FORCE)
endif()
# Check build types
if(${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} VERSION_GREATER 2.8 OR ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} VERSION_EQUAL 2.8)
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS Debug Release Timing Profiling RelWithDebInfo MinSizeRel)
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS None Debug Release Timing Profiling RelWithDebInfo MinSizeRel)
endif()
string(TOLOWER "${CMAKE_BUILD_TYPE}" cmake_build_type_tolower)
if( NOT cmake_build_type_tolower STREQUAL "debug"
if( NOT cmake_build_type_tolower STREQUAL ""
AND NOT cmake_build_type_tolower STREQUAL "none"
AND NOT cmake_build_type_tolower STREQUAL "debug"
AND NOT cmake_build_type_tolower STREQUAL "release"
AND NOT cmake_build_type_tolower STREQUAL "timing"
AND NOT cmake_build_type_tolower STREQUAL "profiling"
AND NOT cmake_build_type_tolower STREQUAL "relwithdebinfo")
message(FATAL_ERROR "Unknown build type \"${CMAKE_BUILD_TYPE}\". Allowed values are Debug, Release, Timing, Profiling, RelWithDebInfo (case-insensitive).")
message(FATAL_ERROR "Unknown build type \"${CMAKE_BUILD_TYPE}\". Allowed values are None, Debug, Release, Timing, Profiling, RelWithDebInfo (case-insensitive).")
endif()
# Add debugging flags but only on the first pass
@ -75,6 +77,7 @@ endif(GTSAM_USE_QUATERNIONS)
# Avoid building non-installed exes and unit tests when installing
# FIXME: breaks generation and install of matlab toolbox
# FIXME: can't add install dependencies, so libraries never get built
#set(CMAKE_SKIP_INSTALL_ALL_DEPENDENCY TRUE)
# Pull in infrastructure

View File

@ -1,9 +1,9 @@
# Build/install CppUnitLite
FILE(GLOB cppunitlite_headers "*.h")
FILE(GLOB cppunitlite_src "*.cpp")
file(GLOB cppunitlite_headers "*.h")
file(GLOB cppunitlite_src "*.cpp")
ADD_LIBRARY(CppUnitLite STATIC ${cppunitlite_src})
add_library(CppUnitLite STATIC ${cppunitlite_src})
option(GTSAM_INSTALL_CPPUNITLITE "Enable/Disable installation of CppUnitLite library" ON)
if (GTSAM_INSTALL_CPPUNITLITE)

237
INSTALL
View File

@ -1,237 +0,0 @@
Installation Instructions
*************************
Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005,
2006 Free Software Foundation, Inc.
This file is free documentation; the Free Software Foundation gives
unlimited permission to copy, distribute and modify it.
Basic Installation
==================
IMPORTANT NOTE: GTSAM has some special compilation requirements, see
the README file.
Briefly, the shell commands `./configure; make; make install' should
configure, build, and install this package. The following
more-detailed instructions are generic; see the `README' file for
instructions specific to this package.
The `configure' shell script attempts to guess correct values for
various system-dependent variables used during compilation. It uses
those values to create a `Makefile' in each directory of the package.
It may also create one or more `.h' files containing system-dependent
definitions. Finally, it creates a shell script `config.status' that
you can run in the future to recreate the current configuration, and a
file `config.log' containing compiler output (useful mainly for
debugging `configure').
It can also use an optional file (typically called `config.cache'
and enabled with `--cache-file=config.cache' or simply `-C') that saves
the results of its tests to speed up reconfiguring. Caching is
disabled by default to prevent problems with accidental use of stale
cache files.
If you need to do unusual things to compile the package, please try
to figure out how `configure' could check whether to do them, and mail
diffs or instructions to the address given in the `README' so they can
be considered for the next release. If you are using the cache, and at
some point `config.cache' contains results you don't want to keep, you
may remove or edit it.
The file `configure.ac' (or `configure.in') is used to create
`configure' by a program called `autoconf'. You need `configure.ac' if
you want to change it or regenerate `configure' using a newer version
of `autoconf'.
The simplest way to compile this package is:
1. `cd' to the directory containing the package's source code and type
`./configure' to configure the package for your system.
Running `configure' might take a while. While running, it prints
some messages telling which features it is checking for.
2. Type `make' to compile the package.
3. Optionally, type `make check' to run any self-tests that come with
the package.
4. Type `make install' to install the programs and any data files and
documentation.
5. You can remove the program binaries and object files from the
source code directory by typing `make clean'. To also remove the
files that `configure' created (so you can compile the package for
a different kind of computer), type `make distclean'. There is
also a `make maintainer-clean' target, but that is intended mainly
for the package's developers. If you use it, you may have to get
all sorts of other programs in order to regenerate files that came
with the distribution.
Compilers and Options
=====================
Some systems require unusual options for compilation or linking that the
`configure' script does not know about. Run `./configure --help' for
details on some of the pertinent environment variables.
You can give `configure' initial values for configuration parameters
by setting variables in the command line or in the environment. Here
is an example:
./configure CC=c99 CFLAGS=-g LIBS=-lposix
*Note Defining Variables::, for more details.
Compiling For Multiple Architectures
====================================
You can compile the package for more than one kind of computer at the
same time, by placing the object files for each architecture in their
own directory. To do this, you can use GNU `make'. `cd' to the
directory where you want the object files and executables to go and run
the `configure' script. `configure' automatically checks for the
source code in the directory that `configure' is in and in `..'.
With a non-GNU `make', it is safer to compile the package for one
architecture at a time in the source code directory. After you have
installed the package for one architecture, use `make distclean' before
reconfiguring for another architecture.
Installation Names
==================
By default, `make install' installs the package's commands under
`/usr/local/bin', include files under `/usr/local/include', etc. You
can specify an installation prefix other than `/usr/local' by giving
`configure' the option `--prefix=PREFIX'.
You can specify separate installation prefixes for
architecture-specific files and architecture-independent files. If you
pass the option `--exec-prefix=PREFIX' to `configure', the package uses
PREFIX as the prefix for installing programs and libraries.
Documentation and other data files still use the regular prefix.
In addition, if you use an unusual directory layout you can give
options like `--bindir=DIR' to specify different values for particular
kinds of files. Run `configure --help' for a list of the directories
you can set and what kinds of files go in them.
If the package supports it, you can cause programs to be installed
with an extra prefix or suffix on their names by giving `configure' the
option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
Optional Features
=================
Some packages pay attention to `--enable-FEATURE' options to
`configure', where FEATURE indicates an optional part of the package.
They may also pay attention to `--with-PACKAGE' options, where PACKAGE
is something like `gnu-as' or `x' (for the X Window System). The
`README' should mention any `--enable-' and `--with-' options that the
package recognizes.
For packages that use the X Window System, `configure' can usually
find the X include and library files automatically, but if it doesn't,
you can use the `configure' options `--x-includes=DIR' and
`--x-libraries=DIR' to specify their locations.
Specifying the System Type
==========================
There may be some features `configure' cannot figure out automatically,
but needs to determine by the type of machine the package will run on.
Usually, assuming the package is built to be run on the _same_
architectures, `configure' can figure that out, but if it prints a
message saying it cannot guess the machine type, give it the
`--build=TYPE' option. TYPE can either be a short name for the system
type, such as `sun4', or a canonical name which has the form:
CPU-COMPANY-SYSTEM
where SYSTEM can have one of these forms:
OS KERNEL-OS
See the file `config.sub' for the possible values of each field. If
`config.sub' isn't included in this package, then this package doesn't
need to know the machine type.
If you are _building_ compiler tools for cross-compiling, you should
use the option `--target=TYPE' to select the type of system they will
produce code for.
If you want to _use_ a cross compiler, that generates code for a
platform different from the build platform, you should specify the
"host" platform (i.e., that on which the generated programs will
eventually be run) with `--host=TYPE'.
Sharing Defaults
================
If you want to set default values for `configure' scripts to share, you
can create a site shell script called `config.site' that gives default
values for variables like `CC', `cache_file', and `prefix'.
`configure' looks for `PREFIX/share/config.site' if it exists, then
`PREFIX/etc/config.site' if it exists. Or, you can set the
`CONFIG_SITE' environment variable to the location of the site script.
A warning: not all `configure' scripts look for a site script.
Defining Variables
==================
Variables not defined in a site shell script can be set in the
environment passed to `configure'. However, some packages may run
configure again during the build, and the customized values of these
variables may be lost. In order to avoid this problem, you should set
them in the `configure' command line, using `VAR=value'. For example:
./configure CC=/usr/local2/bin/gcc
causes the specified `gcc' to be used as the C compiler (unless it is
overridden in the site shell script).
Unfortunately, this technique does not work for `CONFIG_SHELL' due to
an Autoconf bug. Until the bug is fixed you can use this workaround:
CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash
`configure' Invocation
======================
`configure' recognizes the following options to control how it operates.
`--help'
`-h'
Print a summary of the options to `configure', and exit.
`--version'
`-V'
Print the version of Autoconf used to generate the `configure'
script, and exit.
`--cache-file=FILE'
Enable the cache: use and save the results of the tests in FILE,
traditionally `config.cache'. FILE defaults to `/dev/null' to
disable caching.
`--config-cache'
`-C'
Alias for `--cache-file=config.cache'.
`--quiet'
`--silent'
`-q'
Do not print messages saying which checks are being made. To
suppress all normal output, redirect it to `/dev/null' (any error
messages will still be shown).
`--srcdir=DIR'
Look for the package's source code in directory DIR. Usually
`configure' can determine that directory automatically.
`configure' also accepts some other, not widely useful, options. Run
`configure --help' for more details.

179
README
View File

@ -13,33 +13,33 @@ Please see USAGE for an example on how to use GTSAM.
The code is organized according to the following directory structure:
3rdparty local copies of third party libraries - Eigen3 and CCOLAMD
base provides some base Math and data structures, as well as test-related utilities
geometry points, poses, tensors, etc
inference core graphical model inference such as factor graphs, junction trees, Bayes nets, Bayes trees
linear inference specialized to Gaussian linear case, GaussianFactorGraph etc...
nonlinear non-linear factor graphs and non-linear optimization
slam SLAM and visual SLAM application code
3rdparty local copies of third party libraries - Eigen3 and CCOLAMD
base provides some base Math and data structures, as well as test-related utilities
geometry points, poses, tensors, etc
inference core graphical model inference such as factor graphs, junction trees, Bayes nets, Bayes trees
linear inference specialized to Gaussian linear case, GaussianFactorGraph etc...
nonlinear non-linear factor graphs and non-linear optimization
slam SLAM and visual SLAM application code
This library contains unchanged copies of two third party libraries, with documentation
of licensing in LICENSE and as follows:
- CCOLAMD 2.73: Tim Davis' constrained column approximate minimum degree ordering library
- http://www.cise.ufl.edu/research/sparse
- Licenced under LGPL v2.1, provided in gtsam/3rdparty/CCOLAMD/Doc/lesser.txt
- Eigen 3.0.3: General C++ matrix and linear algebra library
- Eigen 3.0.5: General C++ matrix and linear algebra library
- Licenced under LGPL v3, provided in gtsam/3rdparty/Eigen/COPYING.LGPL
All of the above contain code and tests, and build local shared libraries that are then
bundled in a top-level shared library libgtsam.la. After this is built, you can also run
the more involved tests, which test the entire library:
All of the above contain code and tests, and produce a single library libgtsam.
After this is built, you can also run the more involved tests, which test the entire library:
tests more involved tests that depend on slam
examples Demo applications as a tutorial for using gtsam
tests more involved tests that depend on slam
examples Demo applications as a tutorial for using gtsam
cmake CMake scripts used within the library, as well as for finding GTSAM by dependent projects
Finally, there are some local libraries built needed in the rest of the code:
CppUnitLite unit test library
m4 local M4 macros
CppUnitLite unit test library customized for use with gtsam
wrap code generation utility for the Matlab interface to gtsam
Important Installation Notes Specific to GTSAM
----------------------------------------------
@ -47,8 +47,14 @@ Important Installation Notes Specific to GTSAM
1)
GTSAM requires the following libraries to be installed on your system:
- BOOST version 1.40 or greater (install through Linux repositories or MacPorts)
- Automake
- libtool
GTSAM uses CMake (http://www.cmake.org/) for build automation
- Installed cmake version must be 2.6 or higher
Tested compilers
- gcc
- clang
- OSX gcc
2)
GTSAM makes extensive use of debug assertions, even for checking input of
@ -71,54 +77,115 @@ gtsam will need to be compiled with _GLIBCXX_DEBUG as well, due to the use of
header-only Eigen.
3)
Putting the above together, here are some sample ./configure commands for
compiling gtsam:
GTSAM has been written to support the creation of API documentation using
doxygen. To create html documentation for GTSAM, run the the script
makedoc.sh.
NOTE: If checked out from SVN, before attempting to compile run the command ./autogen.sh
4)
For developers, we primarily use the Eclipse IDE for development, and provide
an Eclipse project file with a variety of make targets to build and debug
from within Eclipse.
For Debugging (native Snow Leopard g++ compiler):
./configure CXXFLAGS="-fno-inline -g -Wall" \
LDFLAGS="-fno-inline -g -Wall"
5)
After installing prerequisites for building GTSAM, you can configure and build
GTSAM using CMake with the default options with the quickstart options. For
details on the full functionality of CMake, see the CMake documentation.
For Debugging (Linux or MacPorts g++ compilers):
./configure CXXFLAGS="-fno-inline -g -Wall -D_GLIBCXX_DEBUG" \
LDFLAGS="-fno-inline -g -Wall"
For Performance:
./configure CXXFLAGS="-DNDEBUG -O3" LDFLAGS="-O3"
After that (this would be for an in-source build, see next for out-of-source):
$ make
$ make check (optional, runs unit tests)
$ make install
Out-of-source build:
--------------------
The above will put object files and executables in the source directories. If you like, it is
very easy to configure the libraries to put all these in a parallel build tree so they do not
clutter the source tree. To do this, instead of running configure in the gtsam directory itself,
run it in sub-directory of choice, e.g., starting out in the main GTSAM folder:
- CMake Quickstart
Installs to the default system install path and builds all components. From a terminal,
starting in the root library folder, execute commands as follows for an out-of-source
build:
$] mkdir build
$] cd build
if you want to install:
$] ../configure ..... (command as above)
$] cmake ..
$] make check (optional, runs unit tests)
$] make install
or if you dont want to install then you can:
This will build the library and unit tests, run all of the unit tests, and then install
the library itself, as well as the Matlab toolbox.
$] ../configure ..... (command as above)
$] ../configure --disable-shared
$] make (or make check)
- Additional CMake Options and Details
Built-in Unit Tests:
--------------------
There is one more optional step in which you can invoke the unit tests included in the gtsam libraries.
$] make check
By verifying all the test results are positive, you can make sure that the functionalities of the GTSAM
libraries are correct.
The cmake scripts force a out-of-source build, so inside gtsam,
create a new folder called "build", and run cmake. From the command line:
$] mkdir build
$] cd build
$] cmake ..
Note the ".." after the cmake command - it tells cmake to look for the
root CMakeLists.txt file in the root gtsam folder instead of in the build folder.
This is a necessary argument for starting cmake in all of its variations.
There a few ways of actually doing the configuration to make adjusting options easier.
cmake the regular command-line version of cmake, allows configuration with scripts
ccmake the curses GUI for cmake, which lets you see the various options, change them, and run configuration.
cmake-gui a real GUI for cmake, which has a similar interface to ccmake, but with easier controls.
Note that during configuration, the settings get cached so if you rerun cmake later,
it will keep your previous settings. In particular, you can use the "cmake" build target
within the Eclipse project to update the configuration, which will be necessary
when file structures change.
While it is possible to use command-line arguments to cmake to change configuration
options, it is usually easier to use cmake-gui or ccmake to set parameters and use the other flags.
Important CMake configuration options:
CMAKE_INSTALL_PREFIX: this is the folder where installed files will go, and for
our development purposes, should be set to the home folder, like so
$] cmake -DCMAKE_INSTALL_PREFIX:PATH=$HOME ..
GTSAM_TOOLBOX_INSTALL_PATH: When the library is installed with "make install",
the generated matlab toolbox code (created by wrap) gets installed as well in
this path. For example, use "/home/username/borg/toolbox" to install the
toolbox in your borg/toolbox folder. The matlab toolbox will get installed
into borg/toolbox/gtsam.
$] cmake -DGTSAM_TOOLBOX_INSTALL_PATH:PATH=$HOME/borg/toolbox ..
GTSAM_BUILD_CONVENIENCE_LIBRARIES: This is a build option to allow for tests in
subfolders to be linked against convenience libraries rather than the full libgtsam.
Set with the command line as follows:
$] cmake -DGTSAM_BUILD_CONVENIENCE_LIBRARIES:OPTION=ON ..
ON (Default) This builds convenience libraries and links tests against them. This
option is suggested for gtsam developers, as it is possible to build
and run tests without first building the rest of the library, and
speeds up compilation for a single test. The downside of this option
is that it will build the entire library again to build the full
libgtsam library, so build/install will be slower.
OFF This will build all of libgtsam before any of the tests, and then
link all of the tests at once. This option is best for users of GTSAM,
as it avoids rebuilding the entirety of gtsam an extra time.
CMAKE_BUILD_TYPE: We support several build configurations for GTSAM (case insensitive)
Debug (default) All error checking options on, no optimization. Use for development.
Release Optimizations turned on, no debug symbols.
Timing Adds ENABLE_TIMING flag to provide statistics on operation
Profiling Standard configuration for use during profiling
RelWithDebInfo Same as Release, but with the -g flag for debug symbols
Build and Install
After configuring, you use make just as you normally would, and the all, check and
install targets work as in autotools. Note that all targets are at the root level
of the build folder. You can also build any of the subfolders individually as
individual targets, such as "make geometry slam" to build both geometry and slam.
Running "make install" will install the library to the prefix location.
Check
As with autotools, "make check" will build and run all of the tests. You can also
run "make timing" to build all of the timing scripts. To run check on a particular
subsection, there is a convention of "make check.[subfolder]", so to run just the
geometry tests, run "make check.geometry". Individual tests can be run by
appending ".run" to the name of the test, for example, to run testMatrix,
run "make testMatrix.run".
The make target "wrap" will build the wrap binary, and the "wrap_gtsam" target will
generate code for the toolbox. By default, the toolbox will be created and installed
by the install target for the library. To change the install folder for the toolbox,
choose a different setting during cmake settings for the toolbox install path.

20
USAGE
View File

@ -20,6 +20,9 @@ Compiling/Linking with gtsam:
the library has also been designed to make use of XML serialization through
the Boost.serialization library, which requires the the Boost.serialization
headers and binaries to be linked.
If you use CMake for your project, you can use the CMake scripts in the
cmake folder for finding GTSAM, CppUnitLite, and Wrap.
Examples:
To see how the library works, examine the unit tests provided.
@ -27,7 +30,7 @@ Examples:
Overview
---------------------------------------------------
The gtsam library has three primary components necessary for the construction
The GTSAM library has three primary components necessary for the construction
of factor graph representation and optimization which users will need to
adapt to their particular problem.
@ -48,19 +51,20 @@ Factors:
VSLAM Example
---------------------------------------------------
The visual slam example shows a full implementation of a slam system. The example contains
derived versions of NonlinearFactor, NonlinearFactorGraph, and a Config, in classes
VSLAMFactor, VSLAMGraph, and VSLAMConfig, respectively.
derived versions of NonlinearFactor, NonlinearFactorGraph, in classes visualSLAM::ProjectionFactor,
visualSLAM::Graph, respectively. The values for the system are stored in the generic
Values structure. For definitions and interface, see gtsam/slam/visualSLAM.h.
The clearest example of the use of the graph to find a solution is in
testVSLAMGraph. The basic process for using graphs is as follows (and can be seen in
testVSLAM. The basic process for using graphs is as follows (and can be seen in
the test):
- Create a NonlinearFactorGraph object (VSLAMGraph)
- Add factors to the graph (note the use of Boost.shared_ptr here) (VSLAMFactor)
- Create an initial configuration (VSLAMConfig)
- Create a NonlinearFactorGraph object (visualSLAM::Graph)
- Add factors to the graph (note the use of Boost.shared_ptr here) (visualSLAM::ProjectionFactor)
- Create an initial configuration (Values)
- Create an elimination ordering of variables (this must include all variables)
- Create and initialize a NonlinearOptimizer object (Note that this is a generic
algorithm that does not need to be derived for a particular problem)
- Call optimization functions with the optimizer to optimize the graph
- Extract an updated configuration from the optimizer
- Extract an updated values from the optimizer

View File

@ -1,30 +0,0 @@
# This is FindCppUnitLite.cmake
# CMake module to locate the CppUnit package
# The following variables will be defined:
#
# CppUnitLite_FOUND : TRUE if the package has been successfully found
# CppUnitLite_INCLUDE_DIR : paths to CppUnitLite's INCLUDE directories
# CppUnitLite_LIBS : paths to CppUnitLite's libraries
# Find include dirs
find_path(_CppUnitLite_INCLUDE_DIR CppUnitLite/Test.h
PATHS ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" /usr/local/include /usr/include )
# Find libraries
find_library(_CppUnitLite_LIB NAMES CppUnitLite
HINTS ${CMAKE_INSTALL_PREFIX}/lib "$ENV{HOME}/lib" /usr/local/lib /usr/lib)
set (CppUnitLite_INCLUDE_DIR ${_CppUnitLite_INCLUDE_DIR} CACHE STRING "CppUnitLite INCLUDE directories")
set (CppUnitLite_LIBS ${_CppUnitLite_LIB} CACHE STRING "CppUnitLite libraries")
# handle the QUIETLY and REQUIRED arguments and set LIBXML2_FOUND to TRUE
# if all listed variables are TRUE
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(CppUnitLite DEFAULT_MSG
_CppUnitLite_INCLUDE_DIR _CppUnitLite_LIB)
mark_as_advanced(_CppUnitLite_INCLUDE_DIR _CppUnitLite_LIB )

View File

@ -1,30 +0,0 @@
# This is FindGTSAM.cmake
# CMake module to locate the GTSAM package
# The following variables will be defined:
#
# GTSAM_FOUND : TRUE if the package has been successfully found
# GTSAM_INCLUDE_DIR : paths to GTSAM's INCLUDE directories
# GTSAM_LIBS : paths to GTSAM's libraries
# Find include dirs
find_path(_gtsam_INCLUDE_DIR gtsam/inference/FactorGraph.h
PATHS ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" /usr/local/include /usr/include )
# Find libraries
find_library(_gtsam_LIB NAMES gtsam
HINTS ${CMAKE_INSTALL_PREFIX}/lib "$ENV{HOME}/lib" /usr/local/lib /usr/lib)
set (GTSAM_INCLUDE_DIR ${_gtsam_INCLUDE_DIR} CACHE STRING "GTSAM INCLUDE directories")
set (GTSAM_LIBS ${_gtsam_LIB} CACHE STRING "GTSAM libraries")
# handle the QUIETLY and REQUIRED arguments and set LIBXML2_FOUND to TRUE
# if all listed variables are TRUE
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GTSAM DEFAULT_MSG
_gtsam_INCLUDE_DIR _gtsam_LIB)
mark_as_advanced(_gtsam_INCLUDE_DIR _gtsam_LIB )

View File

@ -1,30 +0,0 @@
# This is FindWrap.cmake
# CMake module to locate the Wrap tool and header after installation package
# The following variables will be defined:
#
# Wrap_FOUND : TRUE if the package has been successfully found
# Wrap_CMD : command for executing wrap
# Wrap_INCLUDE_DIR : paths to Wrap's INCLUDE directories
# Find include dir
find_path(_Wrap_INCLUDE_DIR wrap/matlab.h
PATHS ${CMAKE_INSTALL_PREFIX}/include "$ENV{HOME}/include" /usr/local/include /usr/include )
# Find the installed executable
find_program(_Wrap_CMD NAMES wrap
PATHS ${CMAKE_INSTALL_PREFIX}/bin "$ENV{HOME}/bin" /usr/local/bin /usr/bin )
set (Wrap_INCLUDE_DIR ${_Wrap_INCLUDE_DIR} CACHE STRING "Wrap INCLUDE directories")
set (Wrap_CMD ${_Wrap_CMD} CACHE STRING "Wrap executable location")
# handle the QUIETLY and REQUIRED arguments and set LIBXML2_FOUND to TRUE
# if all listed variables are TRUE
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Wrap DEFAULT_MSG
_Wrap_INCLUDE_DIR _Wrap_CMD)
mark_as_advanced(_Wrap_INCLUDE_DIR _Wrap_CMD )

View File

@ -1,48 +0,0 @@
# Macros for using wrap functionality
macro(find_mexextension)
## Determine the mex extension
# Apple Macintosh (64-bit) mexmaci64
# Linux (32-bit) mexglx
# Linux (64-bit) mexa64
# Microsoft Windows (32-bit) mexw32
# Windows (64-bit) mexw64
# only support 64-bit apple
if(CMAKE_HOST_APPLE)
set(GTSAM_MEX_BIN_EXTENSION_default mexmaci64)
endif(CMAKE_HOST_APPLE)
if(NOT CMAKE_HOST_APPLE)
# check 64 bit
if( ${CMAKE_SIZEOF_VOID_P} EQUAL 4 )
set( HAVE_64_BIT 0 )
endif( ${CMAKE_SIZEOF_VOID_P} EQUAL 4 )
if( ${CMAKE_SIZEOF_VOID_P} EQUAL 8 )
set( HAVE_64_BIT 1 )
endif( ${CMAKE_SIZEOF_VOID_P} EQUAL 8 )
# Check for linux machines
if (CMAKE_HOST_UNIX)
if (HAVE_64_BIT)
set(GTSAM_MEX_BIN_EXTENSION_default mexa64)
else (HAVE_64_BIT)
set(GTSAM_MEX_BIN_EXTENSION_default mexglx)
endif (HAVE_64_BIT)
endif(CMAKE_HOST_UNIX)
# Check for windows machines
if (CMAKE_HOST_WIN32)
if (HAVE_64_BIT)
set(GTSAM_MEX_BIN_EXTENSION_default mexw64)
else (HAVE_64_BIT)
set(GTSAM_MEX_BIN_EXTENSION_default mexw32)
endif (HAVE_64_BIT)
endif(CMAKE_HOST_WIN32)
endif(NOT CMAKE_HOST_APPLE)
# Allow for setting mex extension manually
set(GTSAM_MEX_BIN_EXTENSION ${GTSAM_MEX_BIN_EXTENSION_default} CACHE DOCSTRING "Extension for matlab mex files")
message(STATUS "Detected Matlab mex extension: ${GTSAM_MEX_BIN_EXTENSION_default}")
message(STATUS "Current Matlab mex extension: ${GTSAM_MEX_BIN_EXTENSION}")
endmacro(find_mexextension)

View File

@ -1,10 +0,0 @@
# print configuration variables
# Usage:
#print_config_flag(${GTSAM_BUILD_TESTS} "Build Tests ")
macro(print_config_flag flag msg)
if ("${flag}" STREQUAL "ON")
message(STATUS " ${msg}: Enabled")
else ("${flag}" STREQUAL "ON")
message(STATUS " ${msg}: Disabled")
endif ("${flag}" STREQUAL "ON")
endmacro(print_config_flag)

View File

@ -1,65 +0,0 @@
# Build macros for using tests
# Collects all tests in an adjacent tests folder and builds them
macro(gtsam_add_tests subdir libs)
add_custom_target(check.${subdir} COMMAND ${CMAKE_CTEST_COMMAND})
file(GLOB tests_srcs "tests/test*.cpp")
foreach(test_src ${tests_srcs})
get_filename_component(test_base ${test_src} NAME_WE)
set( test_bin ${test_base} )
message(STATUS "Adding Test ${test_bin}")
add_executable(${test_bin} ${test_src})
add_dependencies(check.${subdir} ${test_bin})
add_dependencies(check ${test_bin})
add_test(${test_base} ${EXECUTABLE_OUTPUT_PATH}${test_bin} )
if (GTSAM_BUILD_CONVENIENCE_LIBRARIES)
add_dependencies(${test_bin} ${libs} CppUnitLite)
target_link_libraries(${test_bin} ${libs} ${Boost_LIBRARIES} CppUnitLite)
else()
add_dependencies(${test_bin} gtsam-static)
target_link_libraries(${test_bin} ${Boost_LIBRARIES} gtsam-static CppUnitLite)
endif()
add_custom_target(${test_bin}.run ${EXECUTABLE_OUTPUT_PATH}${test_bin} ${ARGN})
endforeach(test_src)
endmacro()
# Collects all tests in an adjacent tests folder and builds them
# This version forces the use of libs, as necessary for wrap/tests
macro(gtsam_add_external_tests subdir libs)
add_custom_target(check.${subdir} COMMAND ${CMAKE_CTEST_COMMAND})
file(GLOB tests_srcs "tests/test*.cpp")
foreach(test_src ${tests_srcs})
get_filename_component(test_base ${test_src} NAME_WE)
set( test_bin ${test_base} )
message(STATUS "Adding Test ${test_bin}")
add_executable(${test_bin} ${test_src})
add_dependencies(check.${subdir} ${test_bin})
add_dependencies(check ${test_bin})
add_test(${test_base} ${EXECUTABLE_OUTPUT_PATH}${test_bin} )
add_dependencies(${test_bin} ${libs} CppUnitLite)
target_link_libraries(${test_bin} ${libs} ${Boost_LIBRARIES} CppUnitLite)
add_custom_target(${test_bin}.run ${EXECUTABLE_OUTPUT_PATH}${test_bin} ${ARGN})
endforeach(test_src)
endmacro()
# Collects timing scripts and builds them
macro(gtsam_add_timing subdir libs)
add_custom_target(timing.${subdir})
file(GLOB base_timing_srcs "tests/time*.cpp")
foreach(time_src ${base_timing_srcs})
get_filename_component(time_base ${time_src} NAME_WE)
set( time_bin ${time_base} )
message(STATUS "Adding Timing Benchmark ${time_bin}")
add_executable(${time_bin} ${time_src})
add_dependencies(timing.${subdir} ${time_bin})
add_dependencies(timing ${time_bin})
if (GTSAM_BUILD_CONVENIENCE_LIBRARIES)
add_dependencies(${time_bin} ${libs})
target_link_libraries(${time_bin} ${libs} ${Boost_LIBRARIES} CppUnitLite)
else()
add_dependencies(${time_bin} gtsam-static)
target_link_libraries(${time_bin} ${Boost_LIBRARIES} gtsam-static CppUnitLite)
endif()
add_custom_target(${time_bin}.run ${EXECUTABLE_OUTPUT_PATH}${time_bin} ${ARGN})
endforeach(time_src)
endmacro()

View File

@ -7,13 +7,19 @@ set(base_local_libs
base
)
# Files to exclude from compilation of tests and timing scripts
set(base_excluded_files
# "${CMAKE_CURRENT_SOURCE_DIR}/tests/testTypedDiscreteFactor.cpp" # Example of excluding a test
"" # Add to this list, with full path, to exclude
)
# Build tests
if (GTSAM_BUILD_TESTS)
gtsam_add_tests(base "${base_local_libs}")
gtsam_add_subdir_tests(base "${base_local_libs}" "gtsam-static" "${base_excluded_files}")
endif(GTSAM_BUILD_TESTS)
# Build timing scripts
if (GTSAM_BUILD_TIMING)
gtsam_add_timing(base "${base_local_libs}")
gtsam_add_subdir_timing(base "${base_local_libs}" "gtsam-static" "${base_excluded_files}")
endif(GTSAM_BUILD_TIMING)

View File

@ -8,13 +8,19 @@ set(geometry_local_libs
geometry
)
# Files to exclude from compilation of tests and timing scripts
set(geometry_excluded_files
# "${CMAKE_CURRENT_SOURCE_DIR}/tests/testTypedDiscreteFactor.cpp" # Example of excluding a test
"" # Add to this list, with full path, to exclude
)
# Build tests
if (GTSAM_BUILD_TESTS)
gtsam_add_tests(geometry "${geometry_local_libs}")
gtsam_add_subdir_tests(geometry "${geometry_local_libs}" "gtsam-static" "${geometry_excluded_files}")
endif(GTSAM_BUILD_TESTS)
# Build timing scripts
if (GTSAM_BUILD_TIMING)
gtsam_add_timing(geometry "${geometry_local_libs}")
gtsam_add_subdir_timing(geometry "${geometry_local_libs}" "gtsam-static" "${geometry_excluded_files}")
endif(GTSAM_BUILD_TIMING)

View File

@ -1,272 +0,0 @@
///**
// * @file CalibratedCameraT.h
// * @date Mar 5, 2011
// * @author Yong-Dian Jian
// * @brief calibrated camera template
// */
//
//#pragma once
//
//#include <boost/optional.hpp>
//#include <gtsam/geometry/Pose2.h>
//#include <gtsam/geometry/Pose3.h>
//
//namespace gtsam {
//
// /**
// * A Calibrated camera class [R|-R't], calibration K.
// * If calibration is known, it is more computationally efficient
// * to calibrate the measurements rather than try to predict in pixels.
// * AGC: Is this used or tested anywhere?
// * AGC: If this is a "CalibratedCamera," why is there a calibration stored internally?
// * @ingroup geometry
// * \nosubgrouping
// */
// template <typename Calibration>
// class CalibratedCameraT {
// private:
// Pose3 pose_; // 6DOF pose
// Calibration k_;
//
// public:
//
// /// @name Standard Constructors
// /// @{
//
// ///TODO: comment
// CalibratedCameraT() {}
//
// ///TODO: comment
// CalibratedCameraT(const Pose3& pose):pose_(pose){}
//
// ///TODO: comment
// CalibratedCameraT(const Pose3& pose, const Calibration& k):pose_(pose),k_(k) {}
//
// /// @}
// /// @name Advanced Constructors
// /// @{
//
// ///TODO: comment
// CalibratedCameraT(const Vector &v): pose_(Pose3::Expmap(v)) {}
//
// ///TODO: comment
// CalibratedCameraT(const Vector &v, const Vector &k):pose_(Pose3::Expmap(v)),k_(k){}
//
// /// @}
// /// @name Standard Interface
// /// @{
//
// virtual ~CalibratedCameraT() {}
//
// ///TODO: comment
// inline Pose3& pose() { return pose_; }
//
// ///TODO: comment
// inline const Pose3& pose() const { return pose_; }
//
// ///TODO: comment
// inline Calibration& calibration() { return k_; }
//
// ///TODO: comment
// inline const Calibration& calibration() const { return k_; }
//
// ///TODO: comment
// inline const CalibratedCameraT compose(const CalibratedCameraT &c) const {
// return CalibratedCameraT( pose_ * c.pose(), k_ ) ;
// }
//
// ///TODO: comment
// inline const CalibratedCameraT inverse() const {
// return CalibratedCameraT( pose_.inverse(), k_ ) ;
// }
//
// /// @}
// /// @name Testable
// /// @{
//
// /// assert equality up to a tolerance
// bool equals (const CalibratedCameraT &camera, double tol = 1e-9) const {
// return pose_.equals(camera.pose(), tol) && k_.equals(camera.calibration(), tol) ;
// }
//
// /// print
// void print(const std::string& s = "") const {
// pose_.print("pose3");
// k_.print("calibration");
// }
//
// /// @}
// /// @name Manifold
// /// @{
//
// ///TODO: comment
// CalibratedCameraT retract(const Vector& d) const {
// return CalibratedCameraT(pose().retract(d), k_) ;
// }
//
// ///TODO: comment
// Vector localCoordinates(const CalibratedCameraT& T2) const {
// return pose().localCoordinates(T2.pose()) ;
// }
//
// ///TODO: comment
// inline size_t dim() const { return 6 ; } //TODO: add final dimension variable?
//
// ///TODO: comment
// inline static size_t Dim() { return 6 ; } //TODO: add final dimension variable?
//
// //TODO: remove comment and method?
// /**
// * Create a level camera at the given 2D pose and height
// * @param pose2 specifies the location and viewing direction
// * (theta 0 = looking in direction of positive X axis)
// */
// // static CalibratedCameraT level(const Pose2& pose2, double height);
//
// /* ************************************************************************* */
// // measurement functions and derivatives
// /* ************************************************************************* */
//
// /**
// * This function receives the camera pose and the landmark location and
// * returns the location the point is supposed to appear in the image
// * @param camera the CalibratedCameraT
// * @param point a 3D point to be projected
// * @return the intrinsic coordinates of the projected point
// */
//
// /// @}
// /// @name Transformations
// /// @{
//
// ///TODO: comment
// inline Point2 project(const Point3& point,
// boost::optional<Matrix&> D_intrinsic_pose = boost::none,
// boost::optional<Matrix&> D_intrinsic_point = boost::none) const {
// std::pair<Point2,bool> result = projectSafe(point, D_intrinsic_pose, D_intrinsic_point) ;
// return result.first ;
// }
//
// ///TODO: comment
// std::pair<Point2,bool> projectSafe(
// const Point3& pw,
// boost::optional<Matrix&> D_intrinsic_pose = boost::none,
// boost::optional<Matrix&> D_intrinsic_point = boost::none) const {
//
// if ( !D_intrinsic_pose && !D_intrinsic_point ) {
// Point3 pc = pose_.transform_to(pw) ;
// Point2 pn = project_to_camera(pc) ;
// return std::make_pair(k_.uncalibrate(pn), pc.z() > 0) ;
// }
// // world to camera coordinate
// Matrix Hc1 /* 3*6 */, Hc2 /* 3*3 */ ;
// Point3 pc = pose_.transform_to(pw, Hc1, Hc2) ;
// // camera to normalized image coordinate
// Matrix Hn; // 2*3
// Point2 pn = project_to_camera(pc, Hn) ;
// // uncalibration
// Matrix Hi; // 2*2
// Point2 pi = k_.uncalibrate(pn,boost::none,Hi);
// Matrix tmp = Hi*Hn ;
// if (D_intrinsic_pose) *D_intrinsic_pose = tmp * Hc1 ;
// if (D_intrinsic_point) *D_intrinsic_point = tmp * Hc2 ;
// return std::make_pair(pi, pc.z()>0) ;
// }
//
// /**
// * projects a 3-dimensional point in camera coordinates into the
// * camera and returns a 2-dimensional point, no calibration applied
// * With optional 2by3 derivative
// */
// static Point2 project_to_camera(const Point3& P,
// boost::optional<Matrix&> H1 = boost::none){
// if (H1) {
// double d = 1.0 / P.z(), d2 = d * d;
// *H1 = Matrix_(2, 3, d, 0.0, -P.x() * d2, 0.0, d, -P.y() * d2);
// }
// return Point2(P.x() / P.z(), P.y() / P.z());
// }
//
// /**
// * backproject a 2-dimensional point to a 3-dimension point
// */
// Point3 backproject_from_camera(const Point2& pi, const double scale) const {
// Point2 pn = k_.calibrate(pi);
// Point3 pc(pn.x()*scale, pn.y()*scale, scale);
// return pose_.transform_from(pc);
// }
//
//private:
//
// /// @}
// /// @name Advanced Interface
// /// @{
//
// /** Serialization function */
// friend class boost::serialization::access;
// template<class Archive>
// void serialize(Archive & ar, const unsigned int version) {
// ar & BOOST_SERIALIZATION_NVP(pose_);
// ar & BOOST_SERIALIZATION_NVP(k_);
// }
//
// /// @}
// };
//}
//
////TODO: remove?
//
//// static CalibratedCameraT Expmap(const Vector& v) {
//// return CalibratedCameraT(Pose3::Expmap(v), k_) ;
//// }
//// static Vector Logmap(const CalibratedCameraT& p) {
//// return Pose3::Logmap(p.pose()) ;
//// }
//
//// Point2 project(const Point3& point,
//// boost::optional<Matrix&> D_intrinsic_pose = boost::none,
//// boost::optional<Matrix&> D_intrinsic_point = boost::none) const {
////
//// // no derivative is necessary
//// if ( !D_intrinsic_pose && !D_intrinsic_point ) {
//// Point3 pc = pose_.transform_to(point) ;
//// Point2 pn = project_to_camera(pc) ;
//// return k_.uncalibrate(pn) ;
//// }
////
//// // world to camera coordinate
//// Matrix Hc1 /* 3*6 */, Hc2 /* 3*3 */ ;
//// Point3 pc = pose_.transform_to(point, Hc1, Hc2) ;
////
//// // camera to normalized image coordinate
//// Matrix Hn; // 2*3
//// Point2 pn = project_to_camera(pc, Hn) ;
////
//// // uncalibration
//// Matrix Hi; // 2*2
//// Point2 pi = k_.uncalibrate(pn,boost::none,Hi);
////
//// Matrix tmp = Hi*Hn ;
//// *D_intrinsic_pose = tmp * Hc1 ;
//// *D_intrinsic_point = tmp * Hc2 ;
//// return pi ;
//// }
////
//// std::pair<Point2,bool> projectSafe(
//// const Point3& pw,
//// boost::optional<Matrix&> H1 = boost::none,
//// boost::optional<Matrix&> H2 = boost::none) const {
//// Point3 pc = pose_.transform_to(pw);
//// return std::pair<Point2, bool>( project(pw,H1,H2), pc.z() > 0);
//// }
////
//// std::pair<Point2,bool> projectSafe(
//// const Point3& pw,
//// const Point3& pw_normal,
//// boost::optional<Matrix&> H1 = boost::none,
//// boost::optional<Matrix&> H2 = boost::none) const {
//// Point3 pc = pose_.transform_to(pw);
//// Point3 pc_normal = pose_.rotation().unrotate(pw_normal);
//// return std::pair<Point2, bool>( project(pw,H1,H2), (pc.z() > 0) && (pc_normal.z() < -0.5) );
//// }
//

View File

@ -1,261 +0,0 @@
///* ----------------------------------------------------------------------------
//
// * GTSAM Copyright 2010, Georgia Tech Research Corporation,
// * Atlanta, Georgia 30332-0415
// * All Rights Reserved
// * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
//
// * See LICENSE for the license information
//
// * -------------------------------------------------------------------------- */
//
///**
// * @file GeneralCameraT.h
// * @brief General camera template
// * @date Mar 1, 2010
// * @author Yong-Dian Jian
// */
//
//#pragma once
//
//#include <gtsam/geometry/CalibratedCamera.h>
//#include <gtsam/geometry/Cal3_S2.h>
//#include <gtsam/geometry/Cal3Bundler.h>
//#include <gtsam/geometry/Cal3DS2.h>
//
//namespace gtsam {
//
///**
// * General camera template
// * @ingroup geometry
// * \nosubgrouping
// */
//template <typename Camera, typename Calibration>
//class GeneralCameraT {
//
//private:
// Camera calibrated_; // Calibrated camera
// Calibration calibration_; // Calibration
//
//public:
//
// /// @name Standard Constructors
// /// @{
//
// ///TODO: comment
// GeneralCameraT(){}
//
// ///TODO: comment
// GeneralCameraT(const Camera& calibrated, const Calibration& K) : calibrated_(calibrated), calibration_(K) {}
//
// ///TODO: comment
// GeneralCameraT(const Camera& calibrated ) : calibrated_(calibrated) {}
//
// ///TODO: comment
// GeneralCameraT(const Pose3& pose, const Calibration& K) : calibrated_(pose), calibration_(K) {}
//
// ///TODO: comment
// GeneralCameraT(const Pose3& pose) : calibrated_(pose) {}
//
// /// @}
// /// @name Advanced Constructors
// /// @{
//
// ///TODO: comment
// GeneralCameraT(const Pose3& pose, const Vector &v) : calibrated_(pose), calibration_(v) {}
//
// ///TODO: comment
// GeneralCameraT(const Vector &v) :
// calibrated_(sub(v, 0, Camera::Dim())),
// calibration_(sub(v, Camera::Dim(), Camera::Dim() + Calibration::Dim() )) {}
//
// /// @}
// /// @name Standard Interface
// /// @{
//
// ///TODO: comment
// inline const Pose3& pose() const { return calibrated_.pose(); }
//
// ///TODO: comment
// inline const Camera& calibrated() const { return calibrated_; }
//
// ///TODO: comment
// inline const Calibration& calibration() const { return calibration_; }
//
// ///TODO: comment
// inline GeneralCameraT compose(const Pose3 &p) const {
// return GeneralCameraT( pose().compose(p), calibration_ ) ;
// }
//
// ///TODO: comment
// Matrix D2d_camera(const Point3& point) const {
// Point2 intrinsic = calibrated_.project(point);
// Matrix D_intrinsic_pose = Dproject_pose(calibrated_, point);
// Matrix D_2d_intrinsic = calibration_.D2d_intrinsic(intrinsic);
// Matrix D_2d_pose = D_2d_intrinsic * D_intrinsic_pose;
// Matrix D_2d_calibration = calibration_.D2d_calibration(intrinsic);
//
// const int n1 = calibrated_.dim() ;
// const int n2 = calibration_.dim() ;
// Matrix D(2,n1+n2) ;
//
// sub(D,0,2,0,n1) = D_2d_pose ;
// sub(D,0,2,n1,n1+n2) = D_2d_calibration ;
// return D;
// }
//
// ///TODO: comment
// Matrix D2d_3d(const Point3& point) const {
// Point2 intrinsic = calibrated_.project(point);
// Matrix D_intrinsic_3d = Dproject_point(calibrated_, point);
// Matrix D_2d_intrinsic = calibration_.D2d_intrinsic(intrinsic);
// return D_2d_intrinsic * D_intrinsic_3d;
// }
//
// ///TODO: comment
// Matrix D2d_camera_3d(const Point3& point) const {
// Point2 intrinsic = calibrated_.project(point);
// Matrix D_intrinsic_pose = Dproject_pose(calibrated_, point);
// Matrix D_2d_intrinsic = calibration_.D2d_intrinsic(intrinsic);
// Matrix D_2d_pose = D_2d_intrinsic * D_intrinsic_pose;
// Matrix D_2d_calibration = calibration_.D2d_calibration(intrinsic);
//
// Matrix D_intrinsic_3d = Dproject_point(calibrated_, point);
// Matrix D_2d_3d = D_2d_intrinsic * D_intrinsic_3d;
//
// const int n1 = calibrated_.dim() ;
// const int n2 = calibration_.dim() ;
//
// Matrix D(2,n1+n2+3) ;
//
// sub(D,0,2,0,n1) = D_2d_pose ;
// sub(D,0,2,n1,n1+n2) = D_2d_calibration ;
// sub(D,0,2,n1+n2,n1+n2+3) = D_2d_3d ;
// return D;
// }
//
// /// @}
// /// @name Transformations
// /// @{
//
// ///TODO: comment
// std::pair<Point2,bool> projectSafe(
// const Point3& P,
// boost::optional<Matrix&> H1 = boost::none,
// boost::optional<Matrix&> H2 = boost::none) const {
//
// Point3 cameraPoint = calibrated_.pose().transform_to(P);
// return std::pair<Point2, bool>(
// project(P,H1,H2),
// cameraPoint.z() > 0);
// }
//
// ///TODO: comment
// Point3 backproject(const Point2& projection, const double scale) const {
// Point2 intrinsic = calibration_.calibrate(projection);
// Point3 cameraPoint = CalibratedCamera::backproject_from_camera(intrinsic, scale);
// return calibrated_.pose().transform_from(cameraPoint);
// }
//
// /**
// * project function that does not merge the Jacobians of calibration and pose
// */
// Point2 project(const Point3& P, Matrix& H1_pose, Matrix& H1_k, Matrix& H2_pt) const {
// Matrix tmp;
// Point2 intrinsic = calibrated_.project(P, H1_pose, H2_pt);
// Point2 projection = calibration_.uncalibrate(intrinsic, H1_k, tmp);
// H1_pose = tmp * H1_pose;
// H2_pt = tmp * H2_pt;
// return projection;
// }
//
// /**
// * project a 3d point to the camera
// * P is point in camera coordinate
// * H1 is respect to pose + calibration
// * H2 is respect to landmark
// */
// Point2 project(const Point3& P,
// boost::optional<Matrix&> H1 = boost::none,
// boost::optional<Matrix&> H2 = boost::none) const {
//
// if (!H1 && !H2) {
// Point2 intrinsic = calibrated_.project(P);
// return calibration_.uncalibrate(intrinsic) ;
// }
//
// Matrix H1_k, H1_pose, H2_pt;
// Point2 projection = project(P, H1_pose, H1_k, H2_pt);
// if ( H1 ) *H1 = collect(2, &H1_pose, &H1_k);
// if ( H2 ) *H2 = H2_pt;
//
// return projection;
// }
//
// /// @}
// /// @name Testable
// /// @{
//
// /// checks equality up to a tolerance
// bool equals (const GeneralCameraT &camera, double tol = 1e-9) const {
// return calibrated_.equals(camera.calibrated_, tol) &&
// calibration_.equals(camera.calibration_, tol) ;
// }
//
// /// print with optional string
// void print(const std::string& s = "") const {
// calibrated_.pose().print(s + ".camera.") ;
// calibration_.print(s + ".calibration.") ;
// }
//
// /// @}
// /// @name Manifold
// /// @{
//
// ///TODO: comment
// GeneralCameraT retract(const Vector &v) const {
// Vector v1 = sub(v,0,Camera::Dim());
// Vector v2 = sub(v,Camera::Dim(),Camera::Dim()+Calibration::Dim());
// return GeneralCameraT(calibrated_.retract(v1), calibration_.retract(v2));
// }
//
// ///TODO: comment
// Vector localCoordinates(const GeneralCameraT &C) const {
// const Vector v1(calibrated().localCoordinates(C.calibrated())),
// v2(calibration().localCoordinates(C.calibration()));
// return concatVectors(2,&v1,&v2) ;
// }
//
// //inline size_t dim() { return Camera::dim() + Calibration::dim() ; }
//
// ///TODO: comment
// inline size_t dim() const { return calibrated().dim() + calibration().dim() ; }
//
// ///TODO: comment
// static inline size_t Dim() { return Camera::Dim() + Calibration::Dim() ; }
//
//private:
//
// /// @}
// /// @name Advanced Interface
// /// @{
//
// friend class boost::serialization::access;
// template<class Archive>
//
// /// Serialization function
// void serialize(Archive & ar, const unsigned int version)
// {
// ar & BOOST_SERIALIZATION_NVP(calibrated_);
// ar & BOOST_SERIALIZATION_NVP(calibration_);
// }
//
// /// @}
//
//};
//
//typedef GeneralCameraT<CalibratedCamera,Cal3Bundler> Cal3BundlerCamera;
//typedef GeneralCameraT<CalibratedCamera,Cal3DS2> Cal3DS2Camera; // NOTE: Typedef not referenced in gtsam
//typedef GeneralCameraT<CalibratedCamera,Cal3_S2> Cal3_S2Camera; // NOTE: Typedef not referenced in gtsam
//
//}

View File

@ -1,72 +0,0 @@
///* ----------------------------------------------------------------------------
//
// * GTSAM Copyright 2010, Georgia Tech Research Corporation,
// * Atlanta, Georgia 30332-0415
// * All Rights Reserved
// * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
//
// * See LICENSE for the license information
//
// * -------------------------------------------------------------------------- */
//
///**
// * @file SimpleCamera.cpp
// * @date Aug 16, 2009
// * @author dellaert
// */
//
//#include <gtsam/geometry/SimpleCamera.h>
//#include <gtsam/geometry/CalibratedCamera.h>
//
//using namespace std;
//
//namespace gtsam {
//
// /* ************************************************************************* */
//
// SimpleCamera::SimpleCamera(const Cal3_S2& K,
// const CalibratedCamera& calibrated) :
// calibrated_(calibrated), K_(K) {
// }
//
// SimpleCamera::SimpleCamera(const Cal3_S2& K, const Pose3& pose) :
// calibrated_(pose), K_(K) {
// }
//
// SimpleCamera::~SimpleCamera() {
// }
//
// pair<Point2, bool> SimpleCamera::projectSafe(const Point3& P) const {
// Point3 cameraPoint = calibrated_.pose().transform_to(P);
// Point2 intrinsic = CalibratedCamera::project_to_camera(cameraPoint);
// Point2 projection = K_.uncalibrate(intrinsic);
// return pair<Point2, bool>(projection, cameraPoint.z() > 0);
// }
//
// Point2 SimpleCamera::project(const Point3& point,
// boost::optional<Matrix&> H1, boost::optional<Matrix&> H2) const {
//
// Point2 intrinsic = calibrated_.project(point, H1, H2);
// if (!H1 && !H2)
// return K_.uncalibrate(intrinsic);
// else {
// Matrix D_projection_intrinsic;
// Point2 projection = K_.uncalibrate(intrinsic, boost::none, D_projection_intrinsic);
// if (H1) *H1 = D_projection_intrinsic * (*H1);
// if (H2) *H2 = D_projection_intrinsic * (*H2);
// return projection;
// }
// }
//
// Point3 SimpleCamera::backproject(const Point2& projection, const double scale) const {
// Point2 intrinsic = K_.calibrate(projection);
// Point3 cameraPoint = CalibratedCamera::backproject_from_camera(intrinsic, scale);
// return calibrated_.pose().transform_from(cameraPoint);
// }
//
// SimpleCamera SimpleCamera::level(const Cal3_S2& K, const Pose2& pose2, double height) {
// return SimpleCamera(K, CalibratedCamera::level(pose2, height));
// }
//
///* ************************************************************************* */
//}

View File

@ -24,78 +24,3 @@
namespace gtsam {
typedef PinholeCamera<Cal3_S2> SimpleCamera;
}
//#include <gtsam/geometry/CalibratedCamera.h>
//#include <gtsam/geometry/Cal3_S2.h>
//
//namespace gtsam {
//
// /**
// * A simple camera class with a Cal3_S2 calibration
// * Basically takes a Calibrated camera and adds calibration information
// * to produce measurements in pixels.
// * Not a sublass as a SimpleCamera *is not* a CalibratedCamera.
// */
// class SimpleCamera {
// private:
// CalibratedCamera calibrated_; // Calibrated camera
// Cal3_S2 K_; // Calibration
//
// public:
// SimpleCamera(const Cal3_S2& K, const CalibratedCamera& calibrated);
// SimpleCamera(const Cal3_S2& K, const Pose3& pose);
//
// /** constructor for serialization */
// SimpleCamera(){}
//
// virtual ~SimpleCamera();
//
// const Pose3& pose() const {
// return calibrated_.pose();
// }
//
// const Cal3_S2& calibration() const {
// return K_;
// }
//
// /**
// * project a 3d point to the camera and also check the depth
// */
// std::pair<Point2,bool> projectSafe(const Point3& P) const;
//
// /**
// * backproject a 2d point from the camera up to a given scale
// */
// Point3 backproject(const Point2& projection, const double scale) const;
//
// /**
// * Create a level camera at the given 2D pose and height
// * @param pose2 specifies the location and viewing direction
// * (theta 0 = looking in direction of positive X axis)
// */
// static SimpleCamera level(const Cal3_S2& K, const Pose2& pose2, double height);
//
// /**
// * This function receives the camera pose and the landmark location and
// * returns the location the point is supposed to appear in the image
// */
// Point2 project(const Point3& point,
// boost::optional<Matrix&> H1 = boost::none,
// boost::optional<Matrix&> H2 = boost::none) const;
//
// bool equals(const SimpleCamera& X, double tol=1e-9) const {
// return calibrated_.equals(X.calibrated_, tol) && K_.equals(X.K_, tol);
// }
//
// private:
// /** Serialization function */
// friend class boost::serialization::access;
// template<class ARCHIVE>
// void serialize(ARCHIVE & ar, const unsigned int version) {
// ar & BOOST_SERIALIZATION_NVP(calibrated_);
// ar & BOOST_SERIALIZATION_NVP(K_);
// }
//
// };
//
//}

View File

@ -10,13 +10,19 @@ set(inference_local_libs
ccolamd
)
# Files to exclude from compilation of tests and timing scripts
set(inference_excluded_files
# "${CMAKE_CURRENT_SOURCE_DIR}/tests/testTypedDiscreteFactor.cpp" # Example of excluding a test
"" # Add to this list, with full path, to exclude
)
# Build tests
if (GTSAM_BUILD_TESTS)
gtsam_add_tests(inference "${inference_local_libs}")
gtsam_add_subdir_tests(inference "${inference_local_libs}" "gtsam-static" "${inference_excluded_files}")
endif(GTSAM_BUILD_TESTS)
# Build timing scripts
if (GTSAM_BUILD_TIMING)
gtsam_add_timing(inference "${inference_local_libs}")
gtsam_add_subdir_timing(inference "${inference_local_libs}" "gtsam-static" "${inference_excluded_files}")
endif(GTSAM_BUILD_TIMING)

View File

@ -25,7 +25,8 @@
using namespace gtsam;
using namespace std;
struct EliminationTreeTester {
class EliminationTreeTester {
public:
// build hardcoded tree
static SymbolicEliminationTree::shared_ptr buildHardcodedTree(const SymbolicFactorGraph& fg) {

View File

@ -11,12 +11,18 @@ set(linear_local_libs
ccolamd
)
# Files to exclude from compilation of tests and timing scripts
set(linear_excluded_files
# "${CMAKE_CURRENT_SOURCE_DIR}/tests/testTypedDiscreteFactor.cpp" # Example of excluding a test
"" # Add to this list, with full path, to exclude
)
# Build tests
if (GTSAM_BUILD_TESTS)
gtsam_add_tests(linear "${linear_local_libs}")
gtsam_add_subdir_tests(linear "${linear_local_libs}" "gtsam-static" "${linear_excluded_files}")
endif(GTSAM_BUILD_TESTS)
# Build timing scripts
if (GTSAM_BUILD_TIMING)
gtsam_add_timing(linear "${linear_local_libs}")
gtsam_add_subdir_timing(linear "${linear_local_libs}" "gtsam-static" "${linear_excluded_files}")
endif(GTSAM_BUILD_TIMING)

View File

@ -25,4 +25,17 @@
namespace gtsam {
/* ************************************************************************* */
namespace internal {
template<class BAYESTREE>
void optimizeInPlace(const typename BAYESTREE::sharedClique& clique, VectorValues& result) {
// parents are assumed to already be solved and available in result
clique->conditional()->solveInPlace(result);
// starting from the root, call optimize on each conditional
BOOST_FOREACH(const typename BAYESTREE::sharedClique& child, clique->children_)
optimizeInPlace<BAYESTREE>(child, result);
}
}
}

View File

@ -23,22 +23,15 @@
namespace gtsam {
/* ************************************************************************* */
namespace internal {
void optimizeInPlace(const boost::shared_ptr<BayesTreeClique<GaussianConditional> >& clique, VectorValues& result) {
// parents are assumed to already be solved and available in result
clique->conditional()->solveInPlace(result);
// starting from the root, call optimize on each conditional
BOOST_FOREACH(const boost::shared_ptr<BayesTreeClique<GaussianConditional> >& child, clique->children_)
optimizeInPlace(child, result);
}
VectorValues optimize(const GaussianBayesTree& bayesTree) {
VectorValues result = *allocateVectorValues(bayesTree);
optimizeInPlace(bayesTree, result);
return result;
}
/* ************************************************************************* */
VectorValues optimize(const GaussianBayesTree& bayesTree) {
VectorValues result = *allocateVectorValues(bayesTree);
internal::optimizeInPlace(bayesTree.root(), result);
return result;
void optimizeInPlace(const GaussianBayesTree& bayesTree, VectorValues& result) {
internal::optimizeInPlace<GaussianBayesTree>(bayesTree.root(), result);
}
/* ************************************************************************* */
@ -77,11 +70,6 @@ void optimizeGradientSearchInPlace(const GaussianBayesTree& Rd, VectorValues& gr
toc(4, "Compute point");
}
/* ************************************************************************* */
void optimizeInPlace(const GaussianBayesTree& bayesTree, VectorValues& result) {
internal::optimizeInPlace(bayesTree.root(), result);
}
/* ************************************************************************* */
VectorValues gradient(const GaussianBayesTree& bayesTree, const VectorValues& x0) {
return gradient(FactorGraph<JacobianFactor>(bayesTree), x0);

View File

@ -34,7 +34,8 @@ VectorValues optimize(const GaussianBayesTree& bayesTree);
void optimizeInPlace(const GaussianBayesTree& clique, VectorValues& result);
namespace internal {
void optimizeInPlace(const boost::shared_ptr<BayesTreeClique<GaussianConditional> >& clique, VectorValues& result);
template<class BAYESTREE>
void optimizeInPlace(const typename BAYESTREE::sharedClique& clique, VectorValues& result);
}
/**

View File

@ -27,42 +27,6 @@ using namespace std;
namespace gtsam {
/* ************************************************************************* */
// Helper function used only in this file - extracts vectors with variable indices
// in the first and last iterators, and concatenates them in that order into the
// output.
template<class VALUES, typename ITERATOR>
static Vector extractVectorValuesSlices(const VALUES& values, ITERATOR first, ITERATOR last) {
// Find total dimensionality
int dim = 0;
for(ITERATOR j = first; j != last; ++j)
dim += values[*j].rows();
// Copy vectors
Vector ret(dim);
int varStart = 0;
for(ITERATOR j = first; j != last; ++j) {
ret.segment(varStart, values[*j].rows()) = values[*j];
varStart += values[*j].rows();
}
return ret;
}
/* ************************************************************************* */
// Helper function used only in this file - writes to the variables in values
// with indices iterated over by first and last, interpreting vector as the
// concatenated vectors to write.
template<class VECTOR, class VALUES, typename ITERATOR>
static void writeVectorValuesSlices(const VECTOR& vector, VALUES& values, ITERATOR first, ITERATOR last) {
// Copy vectors
int varStart = 0;
for(ITERATOR j = first; j != last; ++j) {
values[*j] = vector.segment(varStart, values[*j].rows());
varStart += values[*j].rows();
}
assert(varStart == vector.rows());
}
/* ************************************************************************* */
GaussianConditional::GaussianConditional() : rsd_(matrix_) {}
@ -230,7 +194,7 @@ inline static void doSolveInPlace(const GaussianConditional& conditional, VALUES
static const bool debug = false;
if(debug) conditional.print("Solving conditional in place");
Vector xS = extractVectorValuesSlices(x, conditional.beginParents(), conditional.endParents());
Vector xS = internal::extractVectorValuesSlices(x, conditional.beginParents(), conditional.endParents());
xS = conditional.get_d() - conditional.get_S() * xS;
Vector soln = conditional.permutation().transpose() *
conditional.get_R().triangularView<Eigen::Upper>().solve(xS);
@ -238,7 +202,7 @@ inline static void doSolveInPlace(const GaussianConditional& conditional, VALUES
gtsam::print(Matrix(conditional.get_R()), "Calling backSubstituteUpper on ");
gtsam::print(soln, "full back-substitution solution: ");
}
writeVectorValuesSlices(soln, x, conditional.beginFrontals(), conditional.endFrontals());
internal::writeVectorValuesSlices(soln, x, conditional.beginFrontals(), conditional.endFrontals());
}
/* ************************************************************************* */
@ -253,7 +217,7 @@ void GaussianConditional::solveInPlace(Permuted<VectorValues>& x) const {
/* ************************************************************************* */
void GaussianConditional::solveTransposeInPlace(VectorValues& gy) const {
Vector frontalVec = extractVectorValuesSlices(gy, beginFrontals(), endFrontals());
Vector frontalVec = internal::extractVectorValuesSlices(gy, beginFrontals(), endFrontals());
// TODO: verify permutation
frontalVec = permutation_ * gtsam::backSubstituteUpper(frontalVec,Matrix(get_R()));
GaussianConditional::const_iterator it;
@ -261,14 +225,14 @@ void GaussianConditional::solveTransposeInPlace(VectorValues& gy) const {
const Index i = *it;
transposeMultiplyAdd(-1.0,get_S(it),frontalVec,gy[i]);
}
writeVectorValuesSlices(frontalVec, gy, beginFrontals(), endFrontals());
internal::writeVectorValuesSlices(frontalVec, gy, beginFrontals(), endFrontals());
}
/* ************************************************************************* */
void GaussianConditional::scaleFrontalsBySigma(VectorValues& gy) const {
Vector frontalVec = extractVectorValuesSlices(gy, beginFrontals(), endFrontals());
Vector frontalVec = internal::extractVectorValuesSlices(gy, beginFrontals(), endFrontals());
frontalVec = emul(frontalVec, get_sigmas());
writeVectorValuesSlices(frontalVec, gy, beginFrontals(), endFrontals());
internal::writeVectorValuesSlices(frontalVec, gy, beginFrontals(), endFrontals());
}
}

View File

@ -31,7 +31,7 @@
namespace gtsam {
struct SharedDiagonal;
class SharedDiagonal;
/** return A*x-b
* \todo Make this a member function - affects SubgraphPreconditioner */

View File

@ -50,7 +50,7 @@ namespace gtsam {
// back-substitution
tic(3, "back-substitute");
internal::optimizeInPlace(rootClique, result);
internal::optimizeInPlace<GaussianBayesTree>(rootClique, result);
toc(3, "back-substitute");
return result;
}

View File

@ -33,7 +33,7 @@ namespace gtsam {
// Forward declarations
class JacobianFactor;
struct SharedDiagonal;
class SharedDiagonal;
class GaussianConditional;
template<class C> class BayesNet;

View File

@ -42,20 +42,20 @@ using namespace boost::lambda;
namespace gtsam {
/* ************************************************************************* */
inline void JacobianFactor::assertInvariants() const {
#ifndef NDEBUG
void JacobianFactor::assertInvariants() const {
#ifndef NDEBUG
GaussianFactor::assertInvariants(); // The base class checks for unique keys
assert((size() == 0 && Ab_.rows() == 0 && Ab_.nBlocks() == 0) || size()+1 == Ab_.nBlocks());
assert(firstNonzeroBlocks_.size() == Ab_.rows());
for(size_t i=0; i<firstNonzeroBlocks_.size(); ++i)
assert(firstNonzeroBlocks_[i] < Ab_.nBlocks());
#endif
// Check for non-finite values
for(size_t i=0; i<Ab_.rows(); ++i)
for(size_t j=0; j<Ab_.cols(); ++j)
if(isnan(matrix_(i,j)))
throw invalid_argument("JacobianFactor contains NaN matrix entries.");
#endif
}
/* ************************************************************************* */

View File

@ -23,7 +23,7 @@
namespace gtsam {
struct SharedDiagonal; // forward declare
class SharedDiagonal; // forward declare
/// All noise models live in the noiseModel namespace
namespace noiseModel {

View File

@ -25,7 +25,8 @@ namespace gtsam { // note, deliberately not in noiseModel namespace
// A useful convenience class to refer to a shared Diagonal model
// There are (somewhat dangerous) constructors from Vector and pair<size_t,double>
// that call Sigmas and Sigma, respectively.
struct SharedDiagonal: public noiseModel::Diagonal::shared_ptr {
class SharedDiagonal: public noiseModel::Diagonal::shared_ptr {
public:
SharedDiagonal() {
}
SharedDiagonal(const noiseModel::Diagonal::shared_ptr& p) :

View File

@ -27,7 +27,8 @@ namespace gtsam { // note, deliberately not in noiseModel namespace
* A useful convenience class to refer to a shared Gaussian model
* Also needed to make noise models in matlab
*/
struct SharedGaussian: public SharedNoiseModel {
class SharedGaussian: public SharedNoiseModel {
public:
typedef SharedNoiseModel Base;

View File

@ -156,3 +156,22 @@ void VectorValues::operator+=(const VectorValues& c) {
assert(this->hasSameStructure(c));
this->values_ += c.values_;
}
/* ************************************************************************* */
VectorValues& VectorValues::operator=(const Permuted<VectorValues>& rhs) {
if(this->size() != rhs.size())
throw std::invalid_argument("VectorValues assignment from Permuted<VectorValues> requires pre-allocation, see documentation.");
for(size_t j=0; j<this->size(); ++j) {
if(exists(j)) {
SubVector& l(this->at(j));
const SubVector& r(rhs[j]);
if(l.rows() != r.rows())
throw std::invalid_argument("VectorValues assignment from Permuted<VectorValues> requires pre-allocation, see documentation.");
l = r;
} else {
if(rhs.container().exists(rhs.permutation()[j]))
throw std::invalid_argument("VectorValues assignment from Permuted<VectorValues> requires pre-allocation, see documentation.");
}
}
return *this;
}

View File

@ -19,6 +19,7 @@
#include <gtsam/base/Vector.h>
#include <gtsam/base/types.h>
#include <gtsam/inference/Permutation.h>
#include <boost/foreach.hpp>
#include <boost/shared_ptr.hpp>
@ -177,7 +178,7 @@ namespace gtsam {
/** Construct from a container of variable dimensions (in variable order), without initializing any values. */
template<class CONTAINER>
VectorValues(const CONTAINER& dimensions) { append(dimensions); }
explicit VectorValues(const CONTAINER& dimensions) { append(dimensions); }
/** Construct to hold nVars vectors of varDim dimension each. */
VectorValues(Index nVars, size_t varDim) { resize(nVars, varDim); }
@ -273,6 +274,11 @@ namespace gtsam {
*/
void operator+=(const VectorValues& c);
/** Assignment operator from Permuted<VectorValues>, requires the dimensions
* of the assignee to already be properly pre-allocated.
*/
VectorValues& operator=(const Permuted<VectorValues>& rhs);
/// @}
private:
@ -402,4 +408,42 @@ namespace gtsam {
#endif
}
namespace internal {
/* ************************************************************************* */
// Helper function, extracts vectors with variable indices
// in the first and last iterators, and concatenates them in that order into the
// output.
template<class VALUES, typename ITERATOR>
Vector extractVectorValuesSlices(const VALUES& values, ITERATOR first, ITERATOR last) {
// Find total dimensionality
int dim = 0;
for(ITERATOR j = first; j != last; ++j)
dim += values[*j].rows();
// Copy vectors
Vector ret(dim);
int varStart = 0;
for(ITERATOR j = first; j != last; ++j) {
ret.segment(varStart, values[*j].rows()) = values[*j];
varStart += values[*j].rows();
}
return ret;
}
/* ************************************************************************* */
// Helper function, writes to the variables in values
// with indices iterated over by first and last, interpreting vector as the
// concatenated vectors to write.
template<class VECTOR, class VALUES, typename ITERATOR>
void writeVectorValuesSlices(const VECTOR& vector, VALUES& values, ITERATOR first, ITERATOR last) {
// Copy vectors
int varStart = 0;
for(ITERATOR j = first; j != last; ++j) {
values[*j] = vector.segment(varStart, values[*j].rows());
varStart += values[*j].rows();
}
assert(varStart == vector.rows());
}
}
} // \namespace gtsam

View File

@ -12,13 +12,19 @@ set(nonlinear_local_libs
ccolamd
)
# Files to exclude from compilation of tests and timing scripts
set(nonlinear_excluded_files
# "${CMAKE_CURRENT_SOURCE_DIR}/tests/testTypedDiscreteFactor.cpp" # Example of excluding a test
"" # Add to this list, with full path, to exclude
)
# Build tests
if (GTSAM_BUILD_TESTS)
gtsam_add_tests(nonlinear "${nonlinear_local_libs}")
gtsam_add_subdir_tests(nonlinear "${nonlinear_local_libs}" "gtsam-static" "${nonlinear_excluded_files}")
endif(GTSAM_BUILD_TESTS)
# Build timing scripts
if (GTSAM_BUILD_TIMING)
gtsam_add_timing(nonlinear "${nonlinear_local_libs}")
gtsam_add_subdir_timing(nonlinear "${nonlinear_local_libs}" "gtsam-static" "${nonlinear_excluded_files}")
endif(GTSAM_BUILD_TIMING)

View File

@ -135,12 +135,18 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
const F& f, const VALUES& x0, const Ordering& ordering, const double f_error, const bool verbose) {
// Compute steepest descent and Newton's method points
tic(0, "Steepest Descent");
VectorValues dx_u = optimizeGradientSearch(Rd);
toc(0, "Steepest Descent");
tic(1, "optimize");
VectorValues dx_n = optimize(Rd);
toc(1, "optimize");
tic(0, "optimizeGradientSearch");
tic(0, "allocateVectorValues");
VectorValues dx_u = *allocateVectorValues(Rd);
toc(0, "allocateVectorValues");
tic(1, "optimizeGradientSearchInPlace");
optimizeGradientSearchInPlace(Rd, dx_u);
toc(1, "optimizeGradientSearchInPlace");
toc(0, "optimizeGradientSearch");
tic(1, "optimizeInPlace");
VectorValues dx_n(VectorValues::SameStructure(dx_u));
optimizeInPlace(Rd, dx_n);
toc(1, "optimizeInPlace");
tic(2, "jfg error");
const GaussianFactorGraph jfg(Rd);
const double M_error = jfg.error(VectorValues::Zero(dx_u));
@ -177,6 +183,7 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
if(verbose) cout << "f error: " << f_error << " -> " << result.f_error << endl;
if(verbose) cout << "M error: " << M_error << " -> " << new_M_error << endl;
tic(7, "adjust Delta");
// Compute gain ratio. Here we take advantage of the invariant that the
// Bayes' net error at zero is equal to the nonlinear error
const double rho = fabs(M_error - new_M_error) < 1e-15 ?
@ -186,7 +193,6 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
if(verbose) cout << "rho = " << rho << endl;
if(rho >= 0.75) {
tic(7, "Rho >= 0.75");
// M agrees very well with f, so try to increase lambda
const double dx_d_norm = result.dx_d.vector().norm();
const double newDelta = std::max(Delta, 3.0 * dx_d_norm); // Compute new Delta
@ -204,14 +210,12 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
assert(false); }
Delta = newDelta; // Update Delta from new Delta
toc(7, "Rho >= 0.75");
} else if(0.75 > rho && rho >= 0.25) {
// M agrees so-so with f, keep the same Delta
stay = false;
} else if(0.25 > rho && rho >= 0.0) {
tic(8, "0.25 > Rho >= 0.75");
// M does not agree well with f, decrease Delta until it does
double newDelta;
if(Delta > 1e-5)
@ -227,11 +231,8 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
assert(false); }
Delta = newDelta; // Update Delta from new Delta
toc(8, "0.25 > Rho >= 0.75");
}
else {
tic(9, "Rho < 0");
} else {
// f actually increased, so keep decreasing Delta until f does not decrease
assert(0.0 > rho);
if(Delta > 1e-5) {
@ -242,8 +243,8 @@ typename DoglegOptimizerImpl::IterationResult DoglegOptimizerImpl::Iterate(
if(verbose) cout << "Warning: Dog leg stopping because cannot decrease error with minimum Delta" << endl;
stay = false;
}
toc(9, "Rho < 0");
}
toc(7, "adjust Delta");
}
// dx_d and f_error have already been filled in during the loop

View File

@ -1,176 +0,0 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file GaussianISAM2
* @brief Full non-linear ISAM
* @author Michael Kaess
*/
#include <gtsam/inference/FactorGraph.h>
#include <gtsam/linear/JacobianFactor.h>
#include <boost/bind.hpp>
namespace gtsam {
using namespace std;
/* ************************************************************************* */
namespace internal {
template<class CLIQUE>
void optimizeWildfire(const boost::shared_ptr<CLIQUE>& clique, double threshold,
vector<bool>& changed, const vector<bool>& replaced, Permuted<VectorValues>& delta, int& count) {
// if none of the variables in this clique (frontal and separator!) changed
// significantly, then by the running intersection property, none of the
// cliques in the children need to be processed
// Are any clique variables part of the tree that has been redone?
bool cliqueReplaced = replaced[(*clique)->frontals().front()];
#ifndef NDEBUG
BOOST_FOREACH(Index frontal, (*clique)->frontals()) {
assert(cliqueReplaced == replaced[frontal]);
}
#endif
// If not redone, then has one of the separator variables changed significantly?
bool recalculate = cliqueReplaced;
if(!recalculate) {
BOOST_FOREACH(Index parent, (*clique)->parents()) {
if(changed[parent]) {
recalculate = true;
break;
}
}
}
// Solve clique if it was replaced, or if any parents were changed above the
// threshold or themselves replaced.
if(recalculate) {
// Temporary copy of the original values, to check how much they change
vector<Vector> originalValues((*clique)->nrFrontals());
GaussianConditional::const_iterator it;
for(it = (*clique)->beginFrontals(); it!=(*clique)->endFrontals(); it++) {
originalValues[it - (*clique)->beginFrontals()] = delta[*it];
}
// Back-substitute
(*clique)->solveInPlace(delta);
count += (*clique)->nrFrontals();
// Whether the values changed above a threshold, or always true if the
// clique was replaced.
bool valuesChanged = cliqueReplaced;
for(it = (*clique)->beginFrontals(); it!=(*clique)->endFrontals(); it++) {
if(!valuesChanged) {
const Vector& oldValue(originalValues[it - (*clique)->beginFrontals()]);
const SubVector& newValue(delta[*it]);
if((oldValue - newValue).lpNorm<Eigen::Infinity>() >= threshold) {
valuesChanged = true;
break;
}
} else
break;
}
// If the values were above the threshold or this clique was replaced
if(valuesChanged) {
// Set changed flag for each frontal variable and leave the new values
BOOST_FOREACH(Index frontal, (*clique)->frontals()) {
changed[frontal] = true;
}
} else {
// Replace with the old values
for(it = (*clique)->beginFrontals(); it!=(*clique)->endFrontals(); it++) {
delta[*it] = originalValues[it - (*clique)->beginFrontals()];
}
}
// Recurse to children
BOOST_FOREACH(const typename CLIQUE::shared_ptr& child, clique->children_) {
optimizeWildfire(child, threshold, changed, replaced, delta, count);
}
}
}
}
/* ************************************************************************* */
template<class CLIQUE>
int optimizeWildfire(const boost::shared_ptr<CLIQUE>& root, double threshold, const vector<bool>& keys, Permuted<VectorValues>& delta) {
vector<bool> changed(keys.size(), false);
int count = 0;
// starting from the root, call optimize on each conditional
if(root)
internal::optimizeWildfire(root, threshold, changed, keys, delta, count);
return count;
}
/* ************************************************************************* */
template<class GRAPH>
VectorValues optimizeGradientSearch(const ISAM2<GaussianConditional, GRAPH>& isam) {
tic(0, "Allocate VectorValues");
VectorValues grad = *allocateVectorValues(isam);
toc(0, "Allocate VectorValues");
optimizeGradientSearchInPlace(isam, grad);
return grad;
}
/* ************************************************************************* */
template<class GRAPH>
void optimizeGradientSearchInPlace(const ISAM2<GaussianConditional, GRAPH>& Rd, VectorValues& grad) {
tic(1, "Compute Gradient");
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
gradientAtZero(Rd, grad);
double gradientSqNorm = grad.dot(grad);
toc(1, "Compute Gradient");
tic(2, "Compute R*g");
// Compute R * g
FactorGraph<JacobianFactor> Rd_jfg(Rd);
Errors Rg = Rd_jfg * grad;
toc(2, "Compute R*g");
tic(3, "Compute minimizing step size");
// Compute minimizing step size
double step = -gradientSqNorm / dot(Rg, Rg);
toc(3, "Compute minimizing step size");
tic(4, "Compute point");
// Compute steepest descent point
scal(step, grad);
toc(4, "Compute point");
}
/* ************************************************************************* */
template<class CLIQUE>
void nnz_internal(const boost::shared_ptr<CLIQUE>& clique, int& result) {
int dimR = (*clique)->dim();
int dimSep = (*clique)->get_S().cols() - dimR;
result += ((dimR+1)*dimR)/2 + dimSep*dimR;
// traverse the children
BOOST_FOREACH(const typename CLIQUE::shared_ptr& child, clique->children_) {
nnz_internal(child, result);
}
}
/* ************************************************************************* */
template<class CLIQUE>
int calculate_nnz(const boost::shared_ptr<CLIQUE>& clique) {
int result = 0;
// starting from the root, add up entries of frontal and conditional matrices of each conditional
nnz_internal(clique, result);
return result;
}
} /// namespace gtsam

View File

@ -1,60 +0,0 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file GaussianISAM2
* @brief Full non-linear ISAM
* @author Michael Kaess
*/
#include <gtsam/inference/FactorGraph.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/nonlinear/GaussianISAM2.h>
using namespace std;
using namespace gtsam;
#include <boost/bind.hpp>
namespace gtsam {
/* ************************************************************************* */
VectorValues gradient(const BayesTree<GaussianConditional, ISAM2Clique<GaussianConditional> >& bayesTree, const VectorValues& x0) {
return gradient(FactorGraph<JacobianFactor>(bayesTree), x0);
}
/* ************************************************************************* */
static void gradientAtZeroTreeAdder(const boost::shared_ptr<ISAM2Clique<GaussianConditional> >& root, VectorValues& g) {
// Loop through variables in each clique, adding contributions
int variablePosition = 0;
for(GaussianConditional::const_iterator jit = root->conditional()->begin(); jit != root->conditional()->end(); ++jit) {
const int dim = root->conditional()->dim(jit);
g[*jit] += root->gradientContribution().segment(variablePosition, dim);
variablePosition += dim;
}
// Recursively add contributions from children
typedef boost::shared_ptr<ISAM2Clique<GaussianConditional> > sharedClique;
BOOST_FOREACH(const sharedClique& child, root->children()) {
gradientAtZeroTreeAdder(child, g);
}
}
/* ************************************************************************* */
void gradientAtZero(const BayesTree<GaussianConditional, ISAM2Clique<GaussianConditional> >& bayesTree, VectorValues& g) {
// Zero-out gradient
g.setZero();
// Sum up contributions for each clique
gradientAtZeroTreeAdder(bayesTree.root(), g);
}
}

View File

@ -1,153 +0,0 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file GaussianISAM
* @brief Full non-linear ISAM.
* @author Michael Kaess
*/
// \callgraph
#pragma once
#include <gtsam/linear/GaussianConditional.h>
#include <gtsam/nonlinear/ISAM2.h>
namespace gtsam {
/**
* @ingroup ISAM2
* @brief The main ISAM2 class that is exposed to gtsam users, see ISAM2 for usage.
*
* This is a thin wrapper around an ISAM2 class templated on
* GaussianConditional, and the values on which that GaussianISAM2 is
* templated.
*
* @tparam VALUES The Values or TupleValues\Emph{N} that contains the
* variables.
* @tparam GRAPH The NonlinearFactorGraph structure to store factors. Defaults to standard NonlinearFactorGraph<VALUES>
*/
template <class GRAPH = NonlinearFactorGraph>
class GaussianISAM2 : public ISAM2<GaussianConditional, GRAPH> {
typedef ISAM2<GaussianConditional, GRAPH> Base;
public:
/// @name Standard Constructors
/// @{
/** Create an empty ISAM2 instance */
GaussianISAM2(const ISAM2Params& params) : ISAM2<GaussianConditional, GRAPH>(params) {}
/** Create an empty ISAM2 instance using the default set of parameters (see ISAM2Params) */
GaussianISAM2() : ISAM2<GaussianConditional, GRAPH>() {}
/// @}
/// @name Advanced Interface
/// @{
void cloneTo(boost::shared_ptr<GaussianISAM2>& newGaussianISAM2) const {
boost::shared_ptr<Base> isam2 = boost::static_pointer_cast<Base>(newGaussianISAM2);
Base::cloneTo(isam2);
}
/// @}
};
/** Get the linear delta for the ISAM2 object, unpermuted the delta returned by ISAM2::getDelta() */
template<class GRAPH>
VectorValues optimize(const ISAM2<GaussianConditional, GRAPH>& isam) {
VectorValues delta = *allocateVectorValues(isam);
internal::optimizeInPlace(isam.root(), delta);
return delta;
}
/// Optimize the BayesTree, starting from the root.
/// @param replaced Needs to contain
/// all variables that are contained in the top of the Bayes tree that has been
/// redone.
/// @param delta The current solution, an offset from the linearization
/// point.
/// @param threshold The maximum change against the PREVIOUS delta for
/// non-replaced variables that can be ignored, ie. the old delta entry is kept
/// and recursive backsubstitution might eventually stop if none of the changed
/// variables are contained in the subtree.
/// @return The number of variables that were solved for
template<class CLIQUE>
int optimizeWildfire(const boost::shared_ptr<CLIQUE>& root,
double threshold, const std::vector<bool>& replaced, Permuted<VectorValues>& delta);
/**
* Optimize along the gradient direction, with a closed-form computation to
* perform the line search. The gradient is computed about \f$ \delta x=0 \f$.
*
* This function returns \f$ \delta x \f$ that minimizes a reparametrized
* problem. The error function of a GaussianBayesNet is
*
* \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f]
*
* with gradient and Hessian
*
* \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
*
* This function performs the line search in the direction of the
* gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size
* \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$:
*
* \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
*
* Optimizing by setting the derivative to zero yields
* \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function
* evaluates the denominator without computing the Hessian \f$ G \f$, returning
*
* \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f]
*/
template<class GRAPH>
VectorValues optimizeGradientSearch(const ISAM2<GaussianConditional, GRAPH>& isam);
/** In-place version of optimizeGradientSearch requiring pre-allocated VectorValues \c x */
template<class GRAPH>
void optimizeGradientSearchInPlace(const ISAM2<GaussianConditional, GRAPH>& isam, VectorValues& grad);
/// calculate the number of non-zero entries for the tree starting at clique (use root for complete matrix)
template<class CLIQUE>
int calculate_nnz(const boost::shared_ptr<CLIQUE>& clique);
/**
* Compute the gradient of the energy function,
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
* centered around \f$ x = x_0 \f$.
* The gradient is \f$ R^T(Rx-d) \f$.
* This specialized version is used with ISAM2, where each clique stores its
* gradient contribution.
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
* @param x0 The center about which to compute the gradient
* @return The gradient as a VectorValues
*/
VectorValues gradient(const BayesTree<GaussianConditional, ISAM2Clique<GaussianConditional> >& bayesTree, const VectorValues& x0);
/**
* Compute the gradient of the energy function,
* \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
* centered around zero.
* The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&).
* This specialized version is used with ISAM2, where each clique stores its
* gradient contribution.
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
* @return The gradient as a VectorValues
*/
void gradientAtZero(const BayesTree<GaussianConditional, ISAM2Clique<GaussianConditional> >& bayesTree, VectorValues& g);
}/// namespace gtsam
#include <gtsam/nonlinear/GaussianISAM2-inl.h>

View File

@ -10,128 +10,23 @@
* -------------------------------------------------------------------------- */
/**
* @file ISAM2-impl-inl.h
* @file ISAM2-impl.cpp
* @brief Incremental update functionality (ISAM2) for BayesTree, with fluid relinearization.
* @author Michael Kaess, Richard Roberts
*/
#include <gtsam/linear/GaussianBayesTree.h>
#include <boost/range/adaptors.hpp>
#include <boost/range/algorithm.hpp>
#include <gtsam/nonlinear/ISAM2-impl.h>
#include <gtsam/base/debug.h>
namespace gtsam {
using namespace std;
template<class CONDITIONAL, class GRAPH>
struct ISAM2<CONDITIONAL, GRAPH>::Impl {
typedef ISAM2<CONDITIONAL, GRAPH> ISAM2Type;
struct PartialSolveResult {
typename ISAM2Type::sharedClique bayesTree;
Permutation fullReordering;
Permutation fullReorderingInverse;
};
struct ReorderingMode {
size_t nFullSystemVars;
enum { /*AS_ADDED,*/ COLAMD } algorithm;
enum { NO_CONSTRAINT, CONSTRAIN_LAST } constrain;
boost::optional<const FastSet<Index>&> constrainedKeys;
};
/**
* Add new variables to the ISAM2 system.
* @param newTheta Initial values for new variables
* @param theta Current solution to be augmented with new initialization
* @param delta Current linear delta to be augmented with zeros
* @param ordering Current ordering to be augmented with new variables
* @param nodes Current BayesTree::Nodes index to be augmented with slots for new variables
* @param keyFormatter Formatter for printing nonlinear keys during debugging
*/
static void AddVariables(const Values& newTheta, Values& theta, Permuted<VectorValues>& delta, vector<bool>& replacedKeys,
Ordering& ordering, typename Base::Nodes& nodes, const KeyFormatter& keyFormatter = DefaultKeyFormatter);
/**
* Extract the set of variable indices from a NonlinearFactorGraph. For each Symbol
* in each NonlinearFactor, obtains the index by calling ordering[symbol].
* @param ordering The current ordering from which to obtain the variable indices
* @param factors The factors from which to extract the variables
* @return The set of variables indices from the factors
*/
static FastSet<Index> IndicesFromFactors(const Ordering& ordering, const GRAPH& factors);
/**
* Find the set of variables to be relinearized according to relinearizeThreshold.
* Any variables in the VectorValues delta whose vector magnitude is greater than
* or equal to relinearizeThreshold are returned.
* @param delta The linear delta to check against the threshold
* @param keyFormatter Formatter for printing nonlinear keys during debugging
* @return The set of variable indices in delta whose magnitude is greater than or
* equal to relinearizeThreshold
*/
static FastSet<Index> CheckRelinearization(const Permuted<VectorValues>& delta, const Ordering& ordering,
const ISAM2Params::RelinearizationThreshold& relinearizeThreshold, const KeyFormatter& keyFormatter = DefaultKeyFormatter);
/**
* Recursively search this clique and its children for marked keys appearing
* in the separator, and add the *frontal* keys of any cliques whose
* separator contains any marked keys to the set \c keys. The purpose of
* this is to discover the cliques that need to be redone due to information
* propagating to them from cliques that directly contain factors being
* relinearized.
*
* The original comment says this finds all variables directly connected to
* the marked ones by measurements. Is this true, because it seems like this
* would also pull in variables indirectly connected through other frontal or
* separator variables?
*
* Alternatively could we trace up towards the root for each variable here?
*/
static void FindAll(typename ISAM2Clique<CONDITIONAL>::shared_ptr clique, FastSet<Index>& keys, const vector<bool>& markedMask);
/**
* Apply expmap to the given values, but only for indices appearing in
* \c markedRelinMask. Values are expmapped in-place.
* \param [in][out] values The value to expmap in-place
* \param delta The linear delta with which to expmap
* \param ordering The ordering
* \param mask Mask on linear indices, only \c true entries are expmapped
* \param invalidateIfDebug If this is true, *and* NDEBUG is not defined,
* expmapped deltas will be set to an invalid value (infinity) to catch bugs
* where we might expmap something twice, or expmap it but then not
* recalculate its delta.
* @param keyFormatter Formatter for printing nonlinear keys during debugging
*/
static void ExpmapMasked(Values& values, const Permuted<VectorValues>& delta,
const Ordering& ordering, const std::vector<bool>& mask,
boost::optional<Permuted<VectorValues>&> invalidateIfDebug = boost::optional<Permuted<VectorValues>&>(),
const KeyFormatter& keyFormatter = DefaultKeyFormatter);
/**
* Reorder and eliminate factors. These factors form a subset of the full
* problem, so along with the BayesTree we get a partial reordering of the
* problem that needs to be applied to the other data in ISAM2, which is the
* VariableIndex, the delta, the ordering, and the orphans (including cached
* factors).
* \param factors The factors to eliminate, representing part of the full
* problem. This is permuted during use and so is cleared when this function
* returns in order to invalidate it.
* \param keys The set of indices used in \c factors.
* \return The eliminated BayesTree and the permutation to be applied to the
* rest of the ISAM2 data.
*/
static PartialSolveResult PartialSolve(GaussianFactorGraph& factors, const FastSet<Index>& keys,
const ReorderingMode& reorderingMode);
static size_t UpdateDelta(const boost::shared_ptr<ISAM2Clique<CONDITIONAL> >& root, std::vector<bool>& replacedKeys, Permuted<VectorValues>& delta, double wildfireThreshold);
};
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
void ISAM2<CONDITIONAL,GRAPH>::Impl::AddVariables(
const Values& newTheta, Values& theta, Permuted<VectorValues>& delta, vector<bool>& replacedKeys,
Ordering& ordering,typename Base::Nodes& nodes, const KeyFormatter& keyFormatter) {
void ISAM2::Impl::AddVariables(
const Values& newTheta, Values& theta, Permuted<VectorValues>& delta,
Permuted<VectorValues>& deltaNewton, Permuted<VectorValues>& deltaGradSearch, vector<bool>& replacedKeys,
Ordering& ordering, Base::Nodes& nodes, const KeyFormatter& keyFormatter) {
const bool debug = ISDEBUG("ISAM2 AddVariables");
theta.insert(newTheta);
@ -145,10 +40,18 @@ void ISAM2<CONDITIONAL,GRAPH>::Impl::AddVariables(
delta.container().append(dims);
delta.container().vector().segment(originalDim, newDim).operator=(Vector::Zero(newDim));
delta.permutation().resize(originalnVars + newTheta.size());
deltaNewton.container().append(dims);
deltaNewton.container().vector().segment(originalDim, newDim).operator=(Vector::Zero(newDim));
deltaNewton.permutation().resize(originalnVars + newTheta.size());
deltaGradSearch.container().append(dims);
deltaGradSearch.container().vector().segment(originalDim, newDim).operator=(Vector::Zero(newDim));
deltaGradSearch.permutation().resize(originalnVars + newTheta.size());
{
Index nextVar = originalnVars;
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, newTheta) {
delta.permutation()[nextVar] = nextVar;
deltaNewton.permutation()[nextVar] = nextVar;
deltaGradSearch.permutation()[nextVar] = nextVar;
ordering.insert(key_value.key, nextVar);
if(debug) cout << "Adding variable " << keyFormatter(key_value.key) << " with order " << nextVar << endl;
++ nextVar;
@ -163,10 +66,9 @@ void ISAM2<CONDITIONAL,GRAPH>::Impl::AddVariables(
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
FastSet<Index> ISAM2<CONDITIONAL,GRAPH>::Impl::IndicesFromFactors(const Ordering& ordering, const GRAPH& factors) {
FastSet<Index> ISAM2::Impl::IndicesFromFactors(const Ordering& ordering, const NonlinearFactorGraph& factors) {
FastSet<Index> indices;
BOOST_FOREACH(const typename NonlinearFactor::shared_ptr& factor, factors) {
BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, factors) {
BOOST_FOREACH(Key key, factor->keys()) {
indices.insert(ordering[key]);
}
@ -175,8 +77,7 @@ FastSet<Index> ISAM2<CONDITIONAL,GRAPH>::Impl::IndicesFromFactors(const Ordering
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
FastSet<Index> ISAM2<CONDITIONAL,GRAPH>::Impl::CheckRelinearization(const Permuted<VectorValues>& delta, const Ordering& ordering,
FastSet<Index> ISAM2::Impl::CheckRelinearization(const Permuted<VectorValues>& delta, const Ordering& ordering,
const ISAM2Params::RelinearizationThreshold& relinearizeThreshold, const KeyFormatter& keyFormatter) {
FastSet<Index> relinKeys;
@ -204,8 +105,7 @@ FastSet<Index> ISAM2<CONDITIONAL,GRAPH>::Impl::CheckRelinearization(const Permut
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
void ISAM2<CONDITIONAL,GRAPH>::Impl::FindAll(typename ISAM2Clique<CONDITIONAL>::shared_ptr clique, FastSet<Index>& keys, const vector<bool>& markedMask) {
void ISAM2::Impl::FindAll(ISAM2Clique::shared_ptr clique, FastSet<Index>& keys, const vector<bool>& markedMask) {
static const bool debug = false;
// does the separator contain any of the variables?
bool found = false;
@ -219,14 +119,13 @@ void ISAM2<CONDITIONAL,GRAPH>::Impl::FindAll(typename ISAM2Clique<CONDITIONAL>::
if(debug) clique->print("Key(s) marked in clique ");
if(debug) cout << "so marking key " << (*clique)->keys().front() << endl;
}
BOOST_FOREACH(const typename ISAM2Clique<CONDITIONAL>::shared_ptr& child, clique->children_) {
BOOST_FOREACH(const ISAM2Clique::shared_ptr& child, clique->children_) {
FindAll(child, keys, markedMask);
}
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
void ISAM2<CONDITIONAL, GRAPH>::Impl::ExpmapMasked(Values& values, const Permuted<VectorValues>& delta, const Ordering& ordering,
void ISAM2::Impl::ExpmapMasked(Values& values, const Permuted<VectorValues>& delta, const Ordering& ordering,
const vector<bool>& mask, boost::optional<Permuted<VectorValues>&> invalidateIfDebug, const KeyFormatter& keyFormatter) {
// If debugging, invalidate if requested, otherwise do not invalidate.
// Invalidating means setting expmapped entries to Inf, to trigger assertions
@ -262,12 +161,11 @@ void ISAM2<CONDITIONAL, GRAPH>::Impl::ExpmapMasked(Values& values, const Permute
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
typename ISAM2<CONDITIONAL, GRAPH>::Impl::PartialSolveResult
ISAM2<CONDITIONAL, GRAPH>::Impl::PartialSolve(GaussianFactorGraph& factors,
const FastSet<Index>& keys, const ReorderingMode& reorderingMode) {
ISAM2::Impl::PartialSolveResult
ISAM2::Impl::PartialSolve(GaussianFactorGraph& factors,
const FastSet<Index>& keys, const ReorderingMode& reorderingMode, bool useQR) {
static const bool debug = ISDEBUG("ISAM2 recalculate");
const bool debug = ISDEBUG("ISAM2 recalculate");
PartialSolveResult result;
@ -304,9 +202,15 @@ ISAM2<CONDITIONAL, GRAPH>::Impl::PartialSolve(GaussianFactorGraph& factors,
vector<int> cmember(affectedKeysSelector.size(), 0);
if(reorderingMode.constrain == ReorderingMode::CONSTRAIN_LAST) {
assert(reorderingMode.constrainedKeys);
if(keys.size() > reorderingMode.constrainedKeys->size()) {
BOOST_FOREACH(Index var, *reorderingMode.constrainedKeys) {
cmember[affectedKeysSelectorInverse[var]] = 1;
if(!reorderingMode.constrainedKeys->empty()) {
typedef std::pair<const Index,int> Index_Group;
if(keys.size() > reorderingMode.constrainedKeys->size()) { // Only if some variables are unconstrained
BOOST_FOREACH(const Index_Group& index_group, *reorderingMode.constrainedKeys) {
cmember[affectedKeysSelectorInverse[index_group.first]] = index_group.second; }
} else {
int minGroup = *boost::range::min_element(boost::adaptors::values(*reorderingMode.constrainedKeys));
BOOST_FOREACH(const Index_Group& index_group, *reorderingMode.constrainedKeys) {
cmember[affectedKeysSelectorInverse[index_group.first]] = index_group.second - minGroup; }
}
}
}
@ -340,14 +244,11 @@ ISAM2<CONDITIONAL, GRAPH>::Impl::PartialSolve(GaussianFactorGraph& factors,
// eliminate into a Bayes net
tic(7,"eliminate");
JunctionTree<GaussianFactorGraph, typename ISAM2Type::Clique> jt(factors, affectedFactorsIndex);
result.bayesTree = jt.eliminate(EliminatePreferLDL);
if(debug && result.bayesTree) {
if(boost::dynamic_pointer_cast<ISAM2Clique<CONDITIONAL> >(result.bayesTree))
cout << "Is an ISAM2 clique" << endl;
cout << "Re-eliminated BT:\n";
result.bayesTree->printTree("");
}
JunctionTree<GaussianFactorGraph, ISAM2::Clique> jt(factors, affectedFactorsIndex);
if(!useQR)
result.bayesTree = jt.eliminate(EliminatePreferLDL);
else
result.bayesTree = jt.eliminate(EliminateQR);
toc(7,"eliminate");
tic(8,"permute eliminated");
@ -363,19 +264,18 @@ ISAM2<CONDITIONAL, GRAPH>::Impl::PartialSolve(GaussianFactorGraph& factors,
/* ************************************************************************* */
namespace internal {
inline static void optimizeInPlace(const boost::shared_ptr<ISAM2Clique<GaussianConditional> >& clique, VectorValues& result) {
inline static void optimizeInPlace(const boost::shared_ptr<ISAM2Clique>& clique, VectorValues& result) {
// parents are assumed to already be solved and available in result
clique->conditional()->solveInPlace(result);
// starting from the root, call optimize on each conditional
BOOST_FOREACH(const boost::shared_ptr<ISAM2Clique<GaussianConditional> >& child, clique->children_)
BOOST_FOREACH(const boost::shared_ptr<ISAM2Clique>& child, clique->children_)
optimizeInPlace(child, result);
}
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
size_t ISAM2<CONDITIONAL,GRAPH>::Impl::UpdateDelta(const boost::shared_ptr<ISAM2Clique<CONDITIONAL> >& root, std::vector<bool>& replacedKeys, Permuted<VectorValues>& delta, double wildfireThreshold) {
size_t ISAM2::Impl::UpdateDelta(const boost::shared_ptr<ISAM2Clique>& root, std::vector<bool>& replacedKeys, Permuted<VectorValues>& delta, double wildfireThreshold) {
size_t lastBacksubVariableCount;
@ -412,4 +312,76 @@ size_t ISAM2<CONDITIONAL,GRAPH>::Impl::UpdateDelta(const boost::shared_ptr<ISAM2
return lastBacksubVariableCount;
}
/* ************************************************************************* */
namespace internal {
void updateDoglegDeltas(const boost::shared_ptr<ISAM2Clique>& clique, std::vector<bool>& replacedKeys,
const VectorValues& grad, Permuted<VectorValues>& deltaNewton, Permuted<VectorValues>& RgProd, size_t& varsUpdated) {
// Check if any frontal or separator keys were recalculated, if so, we need
// update deltas and recurse to children, but if not, we do not need to
// recurse further because of the running separator property.
bool anyReplaced = false;
BOOST_FOREACH(Index j, *clique->conditional()) {
if(replacedKeys[j]) {
anyReplaced = true;
break;
}
}
if(anyReplaced) {
// Update the current variable
// Get VectorValues slice corresponding to current variables
Vector gR = internal::extractVectorValuesSlices(grad, (*clique)->beginFrontals(), (*clique)->endFrontals());
Vector gS = internal::extractVectorValuesSlices(grad, (*clique)->beginParents(), (*clique)->endParents());
// Compute R*g and S*g for this clique
Vector RSgProd = ((*clique)->get_R() * (*clique)->permutation().transpose()) * gR + (*clique)->get_S() * gS;
// Write into RgProd vector
internal::writeVectorValuesSlices(RSgProd, RgProd, (*clique)->beginFrontals(), (*clique)->endFrontals());
// Now solve the part of the Newton's method point for this clique (back-substitution)
//(*clique)->solveInPlace(deltaNewton);
varsUpdated += (*clique)->nrFrontals();
// Recurse to children
BOOST_FOREACH(const ISAM2Clique::shared_ptr& child, clique->children()) {
updateDoglegDeltas(child, replacedKeys, grad, deltaNewton, RgProd, varsUpdated); }
}
}
}
/* ************************************************************************* */
size_t ISAM2::Impl::UpdateDoglegDeltas(const ISAM2& isam, double wildfireThreshold, std::vector<bool>& replacedKeys,
Permuted<VectorValues>& deltaNewton, Permuted<VectorValues>& RgProd) {
// Get gradient
VectorValues grad = *allocateVectorValues(isam);
gradientAtZero(isam, grad);
// Update variables
size_t varsUpdated = 0;
internal::updateDoglegDeltas(isam.root(), replacedKeys, grad, deltaNewton, RgProd, varsUpdated);
optimizeWildfire(isam.root(), wildfireThreshold, replacedKeys, deltaNewton);
#if 0
VectorValues expected = *allocateVectorValues(isam);
internal::optimizeInPlace<ISAM2>(isam.root(), expected);
for(size_t j = 0; j<expected.size(); ++j)
assert(equal_with_abs_tol(expected[j], deltaNewton[j], 1e-2));
FactorGraph<JacobianFactor> Rd_jfg(isam);
Errors Rg = Rd_jfg * grad;
double RgMagExpected = dot(Rg, Rg);
double RgMagActual = RgProd.container().vector().squaredNorm();
cout << fabs(RgMagExpected - RgMagActual) << endl;
assert(fabs(RgMagExpected - RgMagActual) < (1e-8 * RgMagActual + 1e-4));
#endif
replacedKeys.assign(replacedKeys.size(), false);
return varsUpdated;
}
}

View File

@ -0,0 +1,135 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file ISAM2-impl.h
* @brief Incremental update functionality (ISAM2) for BayesTree, with fluid relinearization.
* @author Michael Kaess, Richard Roberts
*/
#pragma once
#include <gtsam/linear/GaussianBayesTree.h>
#include <gtsam/nonlinear/ISAM2.h>
namespace gtsam {
using namespace std;
struct ISAM2::Impl {
struct PartialSolveResult {
ISAM2::sharedClique bayesTree;
Permutation fullReordering;
Permutation fullReorderingInverse;
};
struct ReorderingMode {
size_t nFullSystemVars;
enum { /*AS_ADDED,*/ COLAMD } algorithm;
enum { NO_CONSTRAINT, CONSTRAIN_LAST } constrain;
boost::optional<FastMap<Index,int> > constrainedKeys;
};
/**
* Add new variables to the ISAM2 system.
* @param newTheta Initial values for new variables
* @param theta Current solution to be augmented with new initialization
* @param delta Current linear delta to be augmented with zeros
* @param ordering Current ordering to be augmented with new variables
* @param nodes Current BayesTree::Nodes index to be augmented with slots for new variables
* @param keyFormatter Formatter for printing nonlinear keys during debugging
*/
static void AddVariables(const Values& newTheta, Values& theta, Permuted<VectorValues>& delta,
Permuted<VectorValues>& deltaNewton, Permuted<VectorValues>& deltaGradSearch, vector<bool>& replacedKeys,
Ordering& ordering, Base::Nodes& nodes, const KeyFormatter& keyFormatter = DefaultKeyFormatter);
/**
* Extract the set of variable indices from a NonlinearFactorGraph. For each Symbol
* in each NonlinearFactor, obtains the index by calling ordering[symbol].
* @param ordering The current ordering from which to obtain the variable indices
* @param factors The factors from which to extract the variables
* @return The set of variables indices from the factors
*/
static FastSet<Index> IndicesFromFactors(const Ordering& ordering, const NonlinearFactorGraph& factors);
/**
* Find the set of variables to be relinearized according to relinearizeThreshold.
* Any variables in the VectorValues delta whose vector magnitude is greater than
* or equal to relinearizeThreshold are returned.
* @param delta The linear delta to check against the threshold
* @param keyFormatter Formatter for printing nonlinear keys during debugging
* @return The set of variable indices in delta whose magnitude is greater than or
* equal to relinearizeThreshold
*/
static FastSet<Index> CheckRelinearization(const Permuted<VectorValues>& delta, const Ordering& ordering,
const ISAM2Params::RelinearizationThreshold& relinearizeThreshold, const KeyFormatter& keyFormatter = DefaultKeyFormatter);
/**
* Recursively search this clique and its children for marked keys appearing
* in the separator, and add the *frontal* keys of any cliques whose
* separator contains any marked keys to the set \c keys. The purpose of
* this is to discover the cliques that need to be redone due to information
* propagating to them from cliques that directly contain factors being
* relinearized.
*
* The original comment says this finds all variables directly connected to
* the marked ones by measurements. Is this true, because it seems like this
* would also pull in variables indirectly connected through other frontal or
* separator variables?
*
* Alternatively could we trace up towards the root for each variable here?
*/
static void FindAll(ISAM2Clique::shared_ptr clique, FastSet<Index>& keys, const vector<bool>& markedMask);
/**
* Apply expmap to the given values, but only for indices appearing in
* \c markedRelinMask. Values are expmapped in-place.
* \param [in][out] values The value to expmap in-place
* \param delta The linear delta with which to expmap
* \param ordering The ordering
* \param mask Mask on linear indices, only \c true entries are expmapped
* \param invalidateIfDebug If this is true, *and* NDEBUG is not defined,
* expmapped deltas will be set to an invalid value (infinity) to catch bugs
* where we might expmap something twice, or expmap it but then not
* recalculate its delta.
* @param keyFormatter Formatter for printing nonlinear keys during debugging
*/
static void ExpmapMasked(Values& values, const Permuted<VectorValues>& delta,
const Ordering& ordering, const std::vector<bool>& mask,
boost::optional<Permuted<VectorValues>&> invalidateIfDebug = boost::optional<Permuted<VectorValues>&>(),
const KeyFormatter& keyFormatter = DefaultKeyFormatter);
/**
* Reorder and eliminate factors. These factors form a subset of the full
* problem, so along with the BayesTree we get a partial reordering of the
* problem that needs to be applied to the other data in ISAM2, which is the
* VariableIndex, the delta, the ordering, and the orphans (including cached
* factors).
* \param factors The factors to eliminate, representing part of the full
* problem. This is permuted during use and so is cleared when this function
* returns in order to invalidate it.
* \param keys The set of indices used in \c factors.
* \param useQR Whether to use QR (if true), or LDL (if false).
* \return The eliminated BayesTree and the permutation to be applied to the
* rest of the ISAM2 data.
*/
static PartialSolveResult PartialSolve(GaussianFactorGraph& factors, const FastSet<Index>& keys,
const ReorderingMode& reorderingMode, bool useQR);
static size_t UpdateDelta(const boost::shared_ptr<ISAM2Clique>& root, std::vector<bool>& replacedKeys, Permuted<VectorValues>& delta, double wildfireThreshold);
static size_t UpdateDoglegDeltas(const ISAM2& isam, double wildfireThreshold, std::vector<bool>& replacedKeys,
Permuted<VectorValues>& deltaNewton, Permuted<VectorValues>& RgProd);
};
}

View File

@ -1,6 +1,6 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
@ -10,624 +10,144 @@
* -------------------------------------------------------------------------- */
/**
* @file ISAM2-inl.h
* @brief Incremental update functionality (ISAM2) for BayesTree, with fluid relinearization.
* @author Michael Kaess, Richard Roberts
* @file ISAM2-inl.h
* @brief
* @author Richard Roberts
* @date Mar 16, 2012
*/
#pragma once
#include <boost/foreach.hpp>
#include <boost/assign/std/list.hpp> // for operator +=
using namespace boost::assign;
#include <gtsam/base/timing.h>
#include <gtsam/base/debug.h>
#include <gtsam/linear/GaussianJunctionTree.h>
#include <gtsam/inference/BayesTree-inl.h>
#include <gtsam/linear/HessianFactor.h>
#include <gtsam/nonlinear/ISAM2-impl-inl.h>
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
#include <gtsam/inference/FactorGraph.h>
#include <gtsam/linear/JacobianFactor.h>
#include <boost/bind.hpp>
namespace gtsam {
using namespace std;
static const bool disableReordering = false;
static const double batchThreshold = 0.65;
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
ISAM2<CONDITIONAL, GRAPH>::ISAM2(const ISAM2Params& params):
delta_(Permutation(), deltaUnpermuted_), deltaUptodate_(true), params_(params) {
// See note in gtsam/base/boost_variant_with_workaround.h
if(params_.optimizationParams.type() == typeid(ISAM2DoglegParams))
doglegDelta_ = boost::get<ISAM2DoglegParams>(params_.optimizationParams).initialDelta;
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
ISAM2<CONDITIONAL, GRAPH>::ISAM2():
delta_(Permutation(), deltaUnpermuted_), deltaUptodate_(true) {
// See note in gtsam/base/boost_variant_with_workaround.h
if(params_.optimizationParams.type() == typeid(ISAM2DoglegParams))
doglegDelta_ = boost::get<ISAM2DoglegParams>(params_.optimizationParams).initialDelta;
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
FastList<size_t> ISAM2<CONDITIONAL, GRAPH>::getAffectedFactors(const FastList<Index>& keys) const {
static const bool debug = false;
if(debug) cout << "Getting affected factors for ";
if(debug) { BOOST_FOREACH(const Index key, keys) { cout << key << " "; } }
if(debug) cout << endl;
FactorGraph<NonlinearFactor > allAffected;
FastList<size_t> indices;
BOOST_FOREACH(const Index key, keys) {
// const list<size_t> l = nonlinearFactors_.factors(key);
// indices.insert(indices.begin(), l.begin(), l.end());
const VariableIndex::Factors& factors(variableIndex_[key]);
BOOST_FOREACH(size_t factor, factors) {
if(debug) cout << "Variable " << key << " affects factor " << factor << endl;
indices.push_back(factor);
}
}
indices.sort();
indices.unique();
if(debug) cout << "Affected factors are: ";
if(debug) { BOOST_FOREACH(const size_t index, indices) { cout << index << " "; } }
if(debug) cout << endl;
return indices;
}
/* ************************************************************************* */
// retrieve all factors that ONLY contain the affected variables
// (note that the remaining stuff is summarized in the cached factors)
template<class CONDITIONAL, class GRAPH>
FactorGraph<GaussianFactor>::shared_ptr
ISAM2<CONDITIONAL, GRAPH>::relinearizeAffectedFactors(const FastList<Index>& affectedKeys) const {
tic(1,"getAffectedFactors");
FastList<size_t> candidates = getAffectedFactors(affectedKeys);
toc(1,"getAffectedFactors");
GRAPH nonlinearAffectedFactors;
tic(2,"affectedKeysSet");
// for fast lookup below
FastSet<Index> affectedKeysSet;
affectedKeysSet.insert(affectedKeys.begin(), affectedKeys.end());
toc(2,"affectedKeysSet");
tic(3,"check candidates");
BOOST_FOREACH(size_t idx, candidates) {
bool inside = true;
BOOST_FOREACH(Key key, nonlinearFactors_[idx]->keys()) {
Index var = ordering_[key];
if (affectedKeysSet.find(var) == affectedKeysSet.end()) {
inside = false;
break;
}
}
if (inside)
nonlinearAffectedFactors.push_back(nonlinearFactors_[idx]);
}
toc(3,"check candidates");
tic(4,"linearize");
FactorGraph<GaussianFactor>::shared_ptr linearized(nonlinearAffectedFactors.linearize(theta_, ordering_));
toc(4,"linearize");
return linearized;
}
/* ************************************************************************* */
// find intermediate (linearized) factors from cache that are passed into the affected area
template<class CONDITIONAL, class GRAPH>
FactorGraph<typename ISAM2<CONDITIONAL, GRAPH>::CacheFactor>
ISAM2<CONDITIONAL, GRAPH>::getCachedBoundaryFactors(Cliques& orphans) {
static const bool debug = false;
FactorGraph<CacheFactor> cachedBoundary;
BOOST_FOREACH(sharedClique orphan, orphans) {
// find the last variable that was eliminated
Index key = (*orphan)->frontals().back();
#ifndef NDEBUG
// typename BayesNet<CONDITIONAL>::const_iterator it = orphan->end();
// const CONDITIONAL& lastCONDITIONAL = **(--it);
// typename CONDITIONAL::const_iterator keyit = lastCONDITIONAL.endParents();
// const Index lastKey = *(--keyit);
// assert(key == lastKey);
#endif
// retrieve the cached factor and add to boundary
cachedBoundary.push_back(boost::dynamic_pointer_cast<CacheFactor>(orphan->cachedFactor()));
if(debug) { cout << "Cached factor for variable " << key; orphan->cachedFactor()->print(""); }
}
return cachedBoundary;
}
template<class CONDITIONAL, class GRAPH>
boost::shared_ptr<FastSet<Index> > ISAM2<CONDITIONAL, GRAPH>::recalculate(
const FastSet<Index>& markedKeys, const FastVector<Index>& newKeys, const FactorGraph<GaussianFactor>::shared_ptr newFactors,
const boost::optional<FastSet<Index> >& constrainKeys, ISAM2Result& result) {
// TODO: new factors are linearized twice, the newFactors passed in are not used.
static const bool debug = ISDEBUG("ISAM2 recalculate");
// Input: BayesTree(this), newFactors
//#define PRINT_STATS // figures for paper, disable for timing
#ifdef PRINT_STATS
static int counter = 0;
int maxClique = 0;
double avgClique = 0;
int numCliques = 0;
int nnzR = 0;
if (counter>0) { // cannot call on empty tree
GaussianISAM2_P::CliqueData cdata = this->getCliqueData();
GaussianISAM2_P::CliqueStats cstats = cdata.getStats();
maxClique = cstats.maxCONDITIONALSize;
avgClique = cstats.avgCONDITIONALSize;
numCliques = cdata.conditionalSizes.size();
nnzR = calculate_nnz(this->root());
}
counter++;
#endif
if(debug) {
cout << "markedKeys: ";
BOOST_FOREACH(const Index key, markedKeys) { cout << key << " "; }
cout << endl;
cout << "newKeys: ";
BOOST_FOREACH(const Index key, newKeys) { cout << key << " "; }
cout << endl;
}
// 1. Remove top of Bayes tree and convert to a factor graph:
// (a) For each affected variable, remove the corresponding clique and all parents up to the root.
// (b) Store orphaned sub-trees \BayesTree_{O} of removed cliques.
tic(1, "removetop");
Cliques orphans;
BayesNet<GaussianConditional> affectedBayesNet;
this->removeTop(markedKeys, affectedBayesNet, orphans);
toc(1, "removetop");
if(debug) affectedBayesNet.print("Removed top: ");
if(debug) orphans.print("Orphans: ");
// FactorGraph<GaussianFactor> factors(affectedBayesNet);
// bug was here: we cannot reuse the original factors, because then the cached factors get messed up
// [all the necessary data is actually contained in the affectedBayesNet, including what was passed in from the boundaries,
// so this would be correct; however, in the process we also generate new cached_ entries that will be wrong (ie. they don't
// contain what would be passed up at a certain point if batch elimination was done, but that's what we need); we could choose
// not to update cached_ from here, but then the new information (and potentially different variable ordering) is not reflected
// in the cached_ values which again will be wrong]
// so instead we have to retrieve the original linearized factors AND add the cached factors from the boundary
// BEGIN OF COPIED CODE
// ordering provides all keys in conditionals, there cannot be others because path to root included
tic(2,"affectedKeys");
FastList<Index> affectedKeys = affectedBayesNet.ordering();
toc(2,"affectedKeys");
if(affectedKeys.size() >= theta_.size() * batchThreshold) {
tic(3,"batch");
tic(0,"add keys");
boost::shared_ptr<FastSet<Index> > affectedKeysSet(new FastSet<Index>());
BOOST_FOREACH(const Ordering::value_type& key_index, ordering_) { affectedKeysSet->insert(key_index.second); }
toc(0,"add keys");
tic(1,"reorder");
tic(1,"CCOLAMD");
// Do a batch step - reorder and relinearize all variables
vector<int> cmember(theta_.size(), 0);
FastSet<Index> constrainedKeysSet;
if(constrainKeys)
constrainedKeysSet = *constrainKeys;
else
constrainedKeysSet.insert(newKeys.begin(), newKeys.end());
if(theta_.size() > constrainedKeysSet.size()) {
BOOST_FOREACH(Index var, constrainedKeysSet) { cmember[var] = 1; }
}
Permutation::shared_ptr colamd(inference::PermutationCOLAMD_(variableIndex_, cmember));
Permutation::shared_ptr colamdInverse(colamd->inverse());
toc(1,"CCOLAMD");
// Reorder
tic(2,"permute global variable index");
variableIndex_.permute(*colamd);
toc(2,"permute global variable index");
tic(3,"permute delta");
delta_.permute(*colamd);
toc(3,"permute delta");
tic(4,"permute ordering");
ordering_.permuteWithInverse(*colamdInverse);
toc(4,"permute ordering");
toc(1,"reorder");
tic(2,"linearize");
GaussianFactorGraph factors(*nonlinearFactors_.linearize(theta_, ordering_));
toc(2,"linearize");
tic(5,"eliminate");
JunctionTree<GaussianFactorGraph, typename Base::Clique> jt(factors, variableIndex_);
sharedClique newRoot = jt.eliminate(EliminatePreferLDL);
if(debug) newRoot->print("Eliminated: ");
toc(5,"eliminate");
tic(6,"insert");
this->clear();
this->insert(newRoot);
toc(6,"insert");
toc(3,"batch");
result.variablesReeliminated = affectedKeysSet->size();
lastAffectedMarkedCount = markedKeys.size();
lastAffectedVariableCount = affectedKeysSet->size();
lastAffectedFactorCount = factors.size();
return affectedKeysSet;
} else {
tic(4,"incremental");
// 2. Add the new factors \Factors' into the resulting factor graph
FastList<Index> affectedAndNewKeys;
affectedAndNewKeys.insert(affectedAndNewKeys.end(), affectedKeys.begin(), affectedKeys.end());
affectedAndNewKeys.insert(affectedAndNewKeys.end(), newKeys.begin(), newKeys.end());
tic(1,"relinearizeAffected");
GaussianFactorGraph factors(*relinearizeAffectedFactors(affectedAndNewKeys));
if(debug) factors.print("Relinearized factors: ");
toc(1,"relinearizeAffected");
if(debug) { cout << "Affected keys: "; BOOST_FOREACH(const Index key, affectedKeys) { cout << key << " "; } cout << endl; }
result.variablesReeliminated = affectedAndNewKeys.size();
lastAffectedMarkedCount = markedKeys.size();
lastAffectedVariableCount = affectedKeys.size();
lastAffectedFactorCount = factors.size();
#ifdef PRINT_STATS
// output for generating figures
cout << "linear: #markedKeys: " << markedKeys.size() << " #affectedVariables: " << affectedKeys.size()
<< " #affectedFactors: " << factors.size() << " maxCliqueSize: " << maxClique
<< " avgCliqueSize: " << avgClique << " #Cliques: " << numCliques << " nnzR: " << nnzR << endl;
#endif
tic(2,"cached");
// add the cached intermediate results from the boundary of the orphans ...
FactorGraph<CacheFactor> cachedBoundary = getCachedBoundaryFactors(orphans);
if(debug) cachedBoundary.print("Boundary factors: ");
factors.reserve(factors.size() + cachedBoundary.size());
// Copy so that we can later permute factors
BOOST_FOREACH(const CacheFactor::shared_ptr& cached, cachedBoundary) {
factors.push_back(GaussianFactor::shared_ptr(new CacheFactor(*cached)));
}
toc(2,"cached");
// END OF COPIED CODE
// 3. Re-order and eliminate the factor graph into a Bayes net (Algorithm [alg:eliminate]), and re-assemble into a new Bayes tree (Algorithm [alg:BayesTree])
tic(4,"reorder and eliminate");
tic(1,"list to set");
// create a partial reordering for the new and contaminated factors
// markedKeys are passed in: those variables will be forced to the end in the ordering
boost::shared_ptr<FastSet<Index> > affectedKeysSet(new FastSet<Index>(markedKeys));
affectedKeysSet->insert(affectedKeys.begin(), affectedKeys.end());
toc(1,"list to set");
tic(2,"PartialSolve");
typename Impl::ReorderingMode reorderingMode;
reorderingMode.nFullSystemVars = ordering_.nVars();
reorderingMode.algorithm = Impl::ReorderingMode::COLAMD;
reorderingMode.constrain = Impl::ReorderingMode::CONSTRAIN_LAST;
if(constrainKeys)
reorderingMode.constrainedKeys = *constrainKeys;
else
reorderingMode.constrainedKeys = FastSet<Index>(newKeys.begin(), newKeys.end());
typename Impl::PartialSolveResult partialSolveResult =
Impl::PartialSolve(factors, *affectedKeysSet, reorderingMode);
toc(2,"PartialSolve");
// We now need to permute everything according this partial reordering: the
// delta vector, the global ordering, and the factors we're about to
// re-eliminate. The reordered variables are also mentioned in the
// orphans and the leftover cached factors.
tic(3,"permute global variable index");
variableIndex_.permute(partialSolveResult.fullReordering);
toc(3,"permute global variable index");
tic(4,"permute delta");
delta_.permute(partialSolveResult.fullReordering);
toc(4,"permute delta");
tic(5,"permute ordering");
ordering_.permuteWithInverse(partialSolveResult.fullReorderingInverse);
toc(5,"permute ordering");
toc(4,"reorder and eliminate");
tic(6,"re-assemble");
if(partialSolveResult.bayesTree) {
assert(!this->root_);
this->insert(partialSolveResult.bayesTree);
}
toc(6,"re-assemble");
// 4. Insert the orphans back into the new Bayes tree.
tic(7,"orphans");
tic(1,"permute");
BOOST_FOREACH(sharedClique orphan, orphans) {
(void)orphan->permuteSeparatorWithInverse(partialSolveResult.fullReorderingInverse);
}
toc(1,"permute");
tic(2,"insert");
// add orphans to the bottom of the new tree
BOOST_FOREACH(sharedClique orphan, orphans) {
// Because the affectedKeysSelector is sorted, the orphan separator keys
// will be sorted correctly according to the new elimination order after
// applying the permutation, so findParentClique, which looks for the
// lowest-ordered parent, will still work.
Index parentRepresentative = Base::findParentClique((*orphan)->parents());
sharedClique parent = (*this)[parentRepresentative];
parent->children_ += orphan;
orphan->parent_ = parent; // set new parent!
}
toc(2,"insert");
toc(7,"orphans");
toc(4,"incremental");
return affectedKeysSet;
}
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
ISAM2Result ISAM2<CONDITIONAL, GRAPH>::update(
const GRAPH& newFactors, const Values& newTheta, const FastVector<size_t>& removeFactorIndices,
const boost::optional<FastSet<Key> >& constrainedKeys, bool force_relinearize) {
static const bool debug = ISDEBUG("ISAM2 update");
static const bool verbose = ISDEBUG("ISAM2 update verbose");
static int count = 0;
count++;
lastAffectedVariableCount = 0;
lastAffectedFactorCount = 0;
lastAffectedCliqueCount = 0;
lastAffectedMarkedCount = 0;
lastBacksubVariableCount = 0;
lastNnzTop = 0;
ISAM2Result result;
const bool relinearizeThisStep = force_relinearize || (params_.enableRelinearization && count % params_.relinearizeSkip == 0);
if(verbose) {
cout << "ISAM2::update\n";
this->print("ISAM2: ");
}
// Update delta if we need it to check relinearization later
if(relinearizeThisStep) {
tic(0, "updateDelta");
updateDelta(disableReordering);
toc(0, "updateDelta");
}
tic(1,"push_back factors");
// Add the new factor indices to the result struct
result.newFactorsIndices.resize(newFactors.size());
for(size_t i=0; i<newFactors.size(); ++i)
result.newFactorsIndices[i] = i + nonlinearFactors_.size();
// 1. Add any new factors \Factors:=\Factors\cup\Factors'.
if(debug || verbose) newFactors.print("The new factors are: ");
nonlinearFactors_.push_back(newFactors);
// Remove the removed factors
GRAPH removeFactors; removeFactors.reserve(removeFactorIndices.size());
BOOST_FOREACH(size_t index, removeFactorIndices) {
removeFactors.push_back(nonlinearFactors_[index]);
nonlinearFactors_.remove(index);
}
// Remove removed factors from the variable index so we do not attempt to relinearize them
variableIndex_.remove(removeFactorIndices, *removeFactors.symbolic(ordering_));
toc(1,"push_back factors");
tic(2,"add new variables");
// 2. Initialize any new variables \Theta_{new} and add \Theta:=\Theta\cup\Theta_{new}.
Impl::AddVariables(newTheta, theta_, delta_, deltaReplacedMask_, ordering_, Base::nodes_);
toc(2,"add new variables");
tic(3,"evaluate error before");
if(params_.evaluateNonlinearError)
result.errorBefore.reset(nonlinearFactors_.error(calculateEstimate()));
toc(3,"evaluate error before");
tic(4,"gather involved keys");
// 3. Mark linear update
FastSet<Index> markedKeys = Impl::IndicesFromFactors(ordering_, newFactors); // Get keys from new factors
// Also mark keys involved in removed factors
{
FastSet<Index> markedRemoveKeys = Impl::IndicesFromFactors(ordering_, removeFactors); // Get keys involved in removed factors
markedKeys.insert(markedRemoveKeys.begin(), markedRemoveKeys.end()); // Add to the overall set of marked keys
}
// NOTE: we use assign instead of the iterator constructor here because this
// is a vector of size_t, so the constructor unintentionally resolves to
// vector(size_t count, Index value) instead of the iterator constructor.
FastVector<Index> newKeys; newKeys.assign(markedKeys.begin(), markedKeys.end()); // Make a copy of these, as we'll soon add to them
toc(4,"gather involved keys");
// Check relinearization if we're at the nth step, or we are using a looser loop relin threshold
if (relinearizeThisStep) {
tic(5,"gather relinearize keys");
vector<bool> markedRelinMask(ordering_.nVars(), false);
// 4. Mark keys in \Delta above threshold \beta: J=\{\Delta_{j}\in\Delta|\Delta_{j}\geq\beta\}.
FastSet<Index> relinKeys = Impl::CheckRelinearization(delta_, ordering_, params_.relinearizeThreshold);
if(disableReordering) relinKeys = Impl::CheckRelinearization(delta_, ordering_, 0.0); // This is used for debugging
// Add the variables being relinearized to the marked keys
BOOST_FOREACH(const Index j, relinKeys) { markedRelinMask[j] = true; }
markedKeys.insert(relinKeys.begin(), relinKeys.end());
toc(5,"gather relinearize keys");
tic(6,"fluid find_all");
// 5. Mark all cliques that involve marked variables \Theta_{J} and all their ancestors.
if (!relinKeys.empty() && this->root())
Impl::FindAll(this->root(), markedKeys, markedRelinMask); // add other cliques that have the marked ones in the separator
toc(6,"fluid find_all");
tic(7,"expmap");
// 6. Update linearization point for marked variables: \Theta_{J}:=\Theta_{J}+\Delta_{J}.
if (!relinKeys.empty())
Impl::ExpmapMasked(theta_, delta_, ordering_, markedRelinMask, delta_);
toc(7,"expmap");
result.variablesRelinearized = markedKeys.size();
#ifndef NDEBUG
lastRelinVariables_ = markedRelinMask;
#endif
} else {
result.variablesRelinearized = 0;
#ifndef NDEBUG
lastRelinVariables_ = vector<bool>(ordering_.nVars(), false);
#endif
}
tic(8,"linearize new");
tic(1,"linearize");
// 7. Linearize new factors
FactorGraph<GaussianFactor>::shared_ptr linearFactors = newFactors.linearize(theta_, ordering_);
toc(1,"linearize");
tic(2,"augment VI");
// Augment the variable index with the new factors
variableIndex_.augment(*linearFactors);
toc(2,"augment VI");
toc(8,"linearize new");
tic(9,"recalculate");
// 8. Redo top of Bayes tree
// Convert constrained symbols to indices
boost::optional<FastSet<Index> > constrainedIndices;
if(constrainedKeys) {
constrainedIndices.reset(FastSet<Index>());
BOOST_FOREACH(Key key, *constrainedKeys) {
constrainedIndices->insert(ordering_[key]);
}
}
boost::shared_ptr<FastSet<Index> > replacedKeys;
if(!markedKeys.empty() || !newKeys.empty())
replacedKeys = recalculate(markedKeys, newKeys, linearFactors, constrainedIndices, result);
// Update replaced keys mask (accumulates until back-substitution takes place)
if(replacedKeys) {
BOOST_FOREACH(const Index var, *replacedKeys) {
deltaReplacedMask_[var] = true; } }
toc(9,"recalculate");
//tic(9,"solve");
// 9. Solve
if(debug) delta_.print("delta_: ");
//toc(9,"solve");
tic(10,"evaluate error after");
if(params_.evaluateNonlinearError)
result.errorAfter.reset(nonlinearFactors_.error(calculateEstimate()));
toc(10,"evaluate error after");
result.cliques = this->nodes().size();
deltaUptodate_ = false;
return result;
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
void ISAM2<CONDITIONAL, GRAPH>::updateDelta(bool forceFullSolve) const {
if(params_.optimizationParams.type() == typeid(ISAM2GaussNewtonParams)) {
// If using Gauss-Newton, update with wildfireThreshold
const ISAM2GaussNewtonParams& gaussNewtonParams =
boost::get<ISAM2GaussNewtonParams>(params_.optimizationParams);
const double effectiveWildfireThreshold = forceFullSolve ? 0.0 : gaussNewtonParams.wildfireThreshold;
tic(0, "Wildfire update");
lastBacksubVariableCount = Impl::UpdateDelta(this->root(), deltaReplacedMask_, delta_, effectiveWildfireThreshold);
toc(0, "Wildfire update");
} else if(params_.optimizationParams.type() == typeid(ISAM2DoglegParams)) {
// If using Dogleg, do a Dogleg step
const ISAM2DoglegParams& doglegParams =
boost::get<ISAM2DoglegParams>(params_.optimizationParams);
// Do one Dogleg iteration
tic(1, "Dogleg Iterate");
DoglegOptimizerImpl::IterationResult doglegResult = DoglegOptimizerImpl::Iterate(
*doglegDelta_, doglegParams.adaptationMode, *this, nonlinearFactors_, theta_, ordering_, nonlinearFactors_.error(theta_), doglegParams.verbose);
toc(1, "Dogleg Iterate");
// Update Delta and linear step
doglegDelta_ = doglegResult.Delta;
delta_.permutation() = Permutation::Identity(delta_.size()); // Dogleg solves for the full delta so there is no permutation
delta_.container() = doglegResult.dx_d; // Copy the VectorValues containing with the linear solution
// Clear replaced mask
deltaReplacedMask_.assign(deltaReplacedMask_.size(), false);
}
deltaUptodate_ = true;
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
Values ISAM2<CONDITIONAL, GRAPH>::calculateEstimate() const {
// We use ExpmapMasked here instead of regular expmap because the former
// handles Permuted<VectorValues>
Values ret(theta_);
vector<bool> mask(ordering_.nVars(), true);
Impl::ExpmapMasked(ret, getDelta(), ordering_, mask);
return ret;
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
template<class VALUE>
VALUE ISAM2<CONDITIONAL, GRAPH>::calculateEstimate(Key key) const {
VALUE ISAM2::calculateEstimate(Key key) const {
const Index index = getOrdering()[key];
const SubVector delta = getDelta()[index];
return theta_.at<VALUE>(key).retract(delta);
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
Values ISAM2<CONDITIONAL, GRAPH>::calculateBestEstimate() const {
VectorValues delta(theta_.dims(ordering_));
optimize2(this->root(), delta);
return theta_.retract(delta, ordering_);
namespace internal {
template<class CLIQUE>
void optimizeWildfire(const boost::shared_ptr<CLIQUE>& clique, double threshold,
vector<bool>& changed, const vector<bool>& replaced, Permuted<VectorValues>& delta, int& count) {
// if none of the variables in this clique (frontal and separator!) changed
// significantly, then by the running intersection property, none of the
// cliques in the children need to be processed
// Are any clique variables part of the tree that has been redone?
bool cliqueReplaced = replaced[(*clique)->frontals().front()];
#ifndef NDEBUG
BOOST_FOREACH(Index frontal, (*clique)->frontals()) {
assert(cliqueReplaced == replaced[frontal]);
}
#endif
// If not redone, then has one of the separator variables changed significantly?
bool recalculate = cliqueReplaced;
if(!recalculate) {
BOOST_FOREACH(Index parent, (*clique)->parents()) {
if(changed[parent]) {
recalculate = true;
break;
}
}
}
// Solve clique if it was replaced, or if any parents were changed above the
// threshold or themselves replaced.
if(recalculate) {
// Temporary copy of the original values, to check how much they change
vector<Vector> originalValues((*clique)->nrFrontals());
GaussianConditional::const_iterator it;
for(it = (*clique)->beginFrontals(); it!=(*clique)->endFrontals(); it++) {
originalValues[it - (*clique)->beginFrontals()] = delta[*it];
}
// Back-substitute
(*clique)->solveInPlace(delta);
count += (*clique)->nrFrontals();
// Whether the values changed above a threshold, or always true if the
// clique was replaced.
bool valuesChanged = cliqueReplaced;
for(it = (*clique)->beginFrontals(); it!=(*clique)->endFrontals(); it++) {
if(!valuesChanged) {
const Vector& oldValue(originalValues[it - (*clique)->beginFrontals()]);
const SubVector& newValue(delta[*it]);
if((oldValue - newValue).lpNorm<Eigen::Infinity>() >= threshold) {
valuesChanged = true;
break;
}
} else
break;
}
// If the values were above the threshold or this clique was replaced
if(valuesChanged) {
// Set changed flag for each frontal variable and leave the new values
BOOST_FOREACH(Index frontal, (*clique)->frontals()) {
changed[frontal] = true;
}
} else {
// Replace with the old values
for(it = (*clique)->beginFrontals(); it!=(*clique)->endFrontals(); it++) {
delta[*it] = originalValues[it - (*clique)->beginFrontals()];
}
}
// Recurse to children
BOOST_FOREACH(const typename CLIQUE::shared_ptr& child, clique->children_) {
optimizeWildfire(child, threshold, changed, replaced, delta, count);
}
}
}
}
/* ************************************************************************* */
template<class CONDITIONAL, class GRAPH>
const Permuted<VectorValues>& ISAM2<CONDITIONAL, GRAPH>::getDelta() const {
if(!deltaUptodate_)
updateDelta();
return delta_;
template<class CLIQUE>
int optimizeWildfire(const boost::shared_ptr<CLIQUE>& root, double threshold, const vector<bool>& keys, Permuted<VectorValues>& delta) {
vector<bool> changed(keys.size(), false);
int count = 0;
// starting from the root, call optimize on each conditional
if(root)
internal::optimizeWildfire(root, threshold, changed, keys, delta, count);
return count;
}
/* ************************************************************************* */
template<class CLIQUE>
void nnz_internal(const boost::shared_ptr<CLIQUE>& clique, int& result) {
int dimR = (*clique)->dim();
int dimSep = (*clique)->get_S().cols() - dimR;
result += ((dimR+1)*dimR)/2 + dimSep*dimR;
// traverse the children
BOOST_FOREACH(const typename CLIQUE::shared_ptr& child, clique->children_) {
nnz_internal(child, result);
}
}
/* ************************************************************************* */
template<class CLIQUE>
int calculate_nnz(const boost::shared_ptr<CLIQUE>& clique) {
int result = 0;
// starting from the root, add up entries of frontal and conditional matrices of each conditional
nnz_internal(clique, result);
return result;
}
}
/// namespace gtsam

749
gtsam/nonlinear/ISAM2.cpp Normal file
View File

@ -0,0 +1,749 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file ISAM2-inl.h
* @brief Incremental update functionality (ISAM2) for BayesTree, with fluid relinearization.
* @author Michael Kaess, Richard Roberts
*/
#include <boost/foreach.hpp>
#include <boost/assign/std/list.hpp> // for operator +=
using namespace boost::assign;
#include <boost/range/adaptors.hpp>
#include <boost/range/algorithm.hpp>
#include <gtsam/base/timing.h>
#include <gtsam/base/debug.h>
#include <gtsam/linear/GaussianJunctionTree.h>
#include <gtsam/inference/BayesTree-inl.h>
#include <gtsam/linear/HessianFactor.h>
#include <gtsam/nonlinear/ISAM2.h>
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
namespace gtsam {
using namespace std;
static const bool disableReordering = false;
static const double batchThreshold = 0.65;
/* ************************************************************************* */
ISAM2::ISAM2(const ISAM2Params& params):
delta_(deltaUnpermuted_), deltaNewton_(deltaNewtonUnpermuted_), RgProd_(RgProdUnpermuted_),
deltaDoglegUptodate_(true), deltaUptodate_(true), params_(params) {
// See note in gtsam/base/boost_variant_with_workaround.h
if(params_.optimizationParams.type() == typeid(ISAM2DoglegParams))
doglegDelta_ = boost::get<ISAM2DoglegParams>(params_.optimizationParams).initialDelta;
}
/* ************************************************************************* */
ISAM2::ISAM2():
delta_(deltaUnpermuted_), deltaNewton_(deltaNewtonUnpermuted_), RgProd_(RgProdUnpermuted_),
deltaDoglegUptodate_(true), deltaUptodate_(true) {
// See note in gtsam/base/boost_variant_with_workaround.h
if(params_.optimizationParams.type() == typeid(ISAM2DoglegParams))
doglegDelta_ = boost::get<ISAM2DoglegParams>(params_.optimizationParams).initialDelta;
}
/* ************************************************************************* */
FastList<size_t> ISAM2::getAffectedFactors(const FastList<Index>& keys) const {
static const bool debug = false;
if(debug) cout << "Getting affected factors for ";
if(debug) { BOOST_FOREACH(const Index key, keys) { cout << key << " "; } }
if(debug) cout << endl;
FactorGraph<NonlinearFactor > allAffected;
FastList<size_t> indices;
BOOST_FOREACH(const Index key, keys) {
// const list<size_t> l = nonlinearFactors_.factors(key);
// indices.insert(indices.begin(), l.begin(), l.end());
const VariableIndex::Factors& factors(variableIndex_[key]);
BOOST_FOREACH(size_t factor, factors) {
if(debug) cout << "Variable " << key << " affects factor " << factor << endl;
indices.push_back(factor);
}
}
indices.sort();
indices.unique();
if(debug) cout << "Affected factors are: ";
if(debug) { BOOST_FOREACH(const size_t index, indices) { cout << index << " "; } }
if(debug) cout << endl;
return indices;
}
/* ************************************************************************* */
// retrieve all factors that ONLY contain the affected variables
// (note that the remaining stuff is summarized in the cached factors)
FactorGraph<GaussianFactor>::shared_ptr
ISAM2::relinearizeAffectedFactors(const FastList<Index>& affectedKeys) const {
tic(1,"getAffectedFactors");
FastList<size_t> candidates = getAffectedFactors(affectedKeys);
toc(1,"getAffectedFactors");
NonlinearFactorGraph nonlinearAffectedFactors;
tic(2,"affectedKeysSet");
// for fast lookup below
FastSet<Index> affectedKeysSet;
affectedKeysSet.insert(affectedKeys.begin(), affectedKeys.end());
toc(2,"affectedKeysSet");
tic(3,"check candidates");
BOOST_FOREACH(size_t idx, candidates) {
bool inside = true;
BOOST_FOREACH(Key key, nonlinearFactors_[idx]->keys()) {
Index var = ordering_[key];
if (affectedKeysSet.find(var) == affectedKeysSet.end()) {
inside = false;
break;
}
}
if (inside)
nonlinearAffectedFactors.push_back(nonlinearFactors_[idx]);
}
toc(3,"check candidates");
tic(4,"linearize");
FactorGraph<GaussianFactor>::shared_ptr linearized(nonlinearAffectedFactors.linearize(theta_, ordering_));
toc(4,"linearize");
return linearized;
}
/* ************************************************************************* */
// find intermediate (linearized) factors from cache that are passed into the affected area
GaussianFactorGraph ISAM2::getCachedBoundaryFactors(Cliques& orphans) {
static const bool debug = false;
GaussianFactorGraph cachedBoundary;
BOOST_FOREACH(sharedClique orphan, orphans) {
// find the last variable that was eliminated
Index key = (*orphan)->frontals().back();
#ifndef NDEBUG
// typename BayesNet<CONDITIONAL>::const_iterator it = orphan->end();
// const CONDITIONAL& lastCONDITIONAL = **(--it);
// typename CONDITIONAL::const_iterator keyit = lastCONDITIONAL.endParents();
// const Index lastKey = *(--keyit);
// assert(key == lastKey);
#endif
// retrieve the cached factor and add to boundary
cachedBoundary.push_back(orphan->cachedFactor());
if(debug) { cout << "Cached factor for variable " << key; orphan->cachedFactor()->print(""); }
}
return cachedBoundary;
}
boost::shared_ptr<FastSet<Index> > ISAM2::recalculate(
const FastSet<Index>& markedKeys, const FastVector<Index>& newKeys, const FactorGraph<GaussianFactor>::shared_ptr newFactors,
const boost::optional<FastMap<Index,int> >& constrainKeys, ISAM2Result& result) {
// TODO: new factors are linearized twice, the newFactors passed in are not used.
const bool debug = ISDEBUG("ISAM2 recalculate");
// Input: BayesTree(this), newFactors
//#define PRINT_STATS // figures for paper, disable for timing
#ifdef PRINT_STATS
static int counter = 0;
int maxClique = 0;
double avgClique = 0;
int numCliques = 0;
int nnzR = 0;
if (counter>0) { // cannot call on empty tree
GaussianISAM2_P::CliqueData cdata = this->getCliqueData();
GaussianISAM2_P::CliqueStats cstats = cdata.getStats();
maxClique = cstats.maxCONDITIONALSize;
avgClique = cstats.avgCONDITIONALSize;
numCliques = cdata.conditionalSizes.size();
nnzR = calculate_nnz(this->root());
}
counter++;
#endif
if(debug) {
cout << "markedKeys: ";
BOOST_FOREACH(const Index key, markedKeys) { cout << key << " "; }
cout << endl;
cout << "newKeys: ";
BOOST_FOREACH(const Index key, newKeys) { cout << key << " "; }
cout << endl;
}
// 1. Remove top of Bayes tree and convert to a factor graph:
// (a) For each affected variable, remove the corresponding clique and all parents up to the root.
// (b) Store orphaned sub-trees \BayesTree_{O} of removed cliques.
tic(1, "removetop");
Cliques orphans;
BayesNet<GaussianConditional> affectedBayesNet;
this->removeTop(markedKeys, affectedBayesNet, orphans);
toc(1, "removetop");
if(debug) affectedBayesNet.print("Removed top: ");
if(debug) orphans.print("Orphans: ");
// FactorGraph<GaussianFactor> factors(affectedBayesNet);
// bug was here: we cannot reuse the original factors, because then the cached factors get messed up
// [all the necessary data is actually contained in the affectedBayesNet, including what was passed in from the boundaries,
// so this would be correct; however, in the process we also generate new cached_ entries that will be wrong (ie. they don't
// contain what would be passed up at a certain point if batch elimination was done, but that's what we need); we could choose
// not to update cached_ from here, but then the new information (and potentially different variable ordering) is not reflected
// in the cached_ values which again will be wrong]
// so instead we have to retrieve the original linearized factors AND add the cached factors from the boundary
// BEGIN OF COPIED CODE
// ordering provides all keys in conditionals, there cannot be others because path to root included
tic(2,"affectedKeys");
FastList<Index> affectedKeys = affectedBayesNet.ordering();
toc(2,"affectedKeys");
if(affectedKeys.size() >= theta_.size() * batchThreshold) {
tic(3,"batch");
tic(0,"add keys");
boost::shared_ptr<FastSet<Index> > affectedKeysSet(new FastSet<Index>());
BOOST_FOREACH(const Ordering::value_type& key_index, ordering_) { affectedKeysSet->insert(key_index.second); }
toc(0,"add keys");
tic(1,"reorder");
tic(1,"CCOLAMD");
// Do a batch step - reorder and relinearize all variables
vector<int> cmember(theta_.size(), 0);
if(constrainKeys) {
if(!constrainKeys->empty()) {
typedef std::pair<const Index,int> Index_Group;
if(theta_.size() > constrainKeys->size()) { // Only if some variables are unconstrained
BOOST_FOREACH(const Index_Group& index_group, *constrainKeys) {
cmember[index_group.first] = index_group.second; }
} else {
int minGroup = *boost::range::min_element(boost::adaptors::values(*constrainKeys));
BOOST_FOREACH(const Index_Group& index_group, *constrainKeys) {
cmember[index_group.first] = index_group.second - minGroup; }
}
}
} else {
if(theta_.size() > newKeys.size()) // Only if some variables are unconstrained
BOOST_FOREACH(Index var, newKeys) { cmember[var] = 1; }
}
Permutation::shared_ptr colamd(inference::PermutationCOLAMD_(variableIndex_, cmember));
Permutation::shared_ptr colamdInverse(colamd->inverse());
toc(1,"CCOLAMD");
// Reorder
tic(2,"permute global variable index");
variableIndex_.permute(*colamd);
toc(2,"permute global variable index");
tic(3,"permute delta");
delta_.permute(*colamd);
deltaNewton_.permute(*colamd);
RgProd_.permute(*colamd);
toc(3,"permute delta");
tic(4,"permute ordering");
ordering_.permuteWithInverse(*colamdInverse);
toc(4,"permute ordering");
toc(1,"reorder");
tic(2,"linearize");
GaussianFactorGraph factors(*nonlinearFactors_.linearize(theta_, ordering_));
toc(2,"linearize");
tic(5,"eliminate");
JunctionTree<GaussianFactorGraph, Base::Clique> jt(factors, variableIndex_);
sharedClique newRoot;
if(params_.factorization == ISAM2Params::LDL)
newRoot = jt.eliminate(EliminatePreferLDL);
else if(params_.factorization == ISAM2Params::QR)
newRoot = jt.eliminate(EliminateQR);
else assert(false);
if(debug) newRoot->print("Eliminated: ");
toc(5,"eliminate");
tic(6,"insert");
this->clear();
this->insert(newRoot);
toc(6,"insert");
toc(3,"batch");
result.variablesReeliminated = affectedKeysSet->size();
lastAffectedMarkedCount = markedKeys.size();
lastAffectedVariableCount = affectedKeysSet->size();
lastAffectedFactorCount = factors.size();
return affectedKeysSet;
} else {
tic(4,"incremental");
// 2. Add the new factors \Factors' into the resulting factor graph
FastList<Index> affectedAndNewKeys;
affectedAndNewKeys.insert(affectedAndNewKeys.end(), affectedKeys.begin(), affectedKeys.end());
affectedAndNewKeys.insert(affectedAndNewKeys.end(), newKeys.begin(), newKeys.end());
tic(1,"relinearizeAffected");
GaussianFactorGraph factors(*relinearizeAffectedFactors(affectedAndNewKeys));
if(debug) factors.print("Relinearized factors: ");
toc(1,"relinearizeAffected");
if(debug) { cout << "Affected keys: "; BOOST_FOREACH(const Index key, affectedKeys) { cout << key << " "; } cout << endl; }
result.variablesReeliminated = affectedAndNewKeys.size();
lastAffectedMarkedCount = markedKeys.size();
lastAffectedVariableCount = affectedKeys.size();
lastAffectedFactorCount = factors.size();
#ifdef PRINT_STATS
// output for generating figures
cout << "linear: #markedKeys: " << markedKeys.size() << " #affectedVariables: " << affectedKeys.size()
<< " #affectedFactors: " << factors.size() << " maxCliqueSize: " << maxClique
<< " avgCliqueSize: " << avgClique << " #Cliques: " << numCliques << " nnzR: " << nnzR << endl;
#endif
tic(2,"cached");
// add the cached intermediate results from the boundary of the orphans ...
GaussianFactorGraph cachedBoundary = getCachedBoundaryFactors(orphans);
if(debug) cachedBoundary.print("Boundary factors: ");
factors.reserve(factors.size() + cachedBoundary.size());
// Copy so that we can later permute factors
BOOST_FOREACH(const GaussianFactor::shared_ptr& cached, cachedBoundary) {
factors.push_back(cached->clone());
}
toc(2,"cached");
// END OF COPIED CODE
// 3. Re-order and eliminate the factor graph into a Bayes net (Algorithm [alg:eliminate]), and re-assemble into a new Bayes tree (Algorithm [alg:BayesTree])
tic(4,"reorder and eliminate");
tic(1,"list to set");
// create a partial reordering for the new and contaminated factors
// markedKeys are passed in: those variables will be forced to the end in the ordering
boost::shared_ptr<FastSet<Index> > affectedKeysSet(new FastSet<Index>(markedKeys));
affectedKeysSet->insert(affectedKeys.begin(), affectedKeys.end());
toc(1,"list to set");
tic(2,"PartialSolve");
Impl::ReorderingMode reorderingMode;
reorderingMode.nFullSystemVars = ordering_.nVars();
reorderingMode.algorithm = Impl::ReorderingMode::COLAMD;
reorderingMode.constrain = Impl::ReorderingMode::CONSTRAIN_LAST;
if(constrainKeys) {
reorderingMode.constrainedKeys = *constrainKeys;
} else {
reorderingMode.constrainedKeys = FastMap<Index,int>();
BOOST_FOREACH(Index var, newKeys) { reorderingMode.constrainedKeys->insert(make_pair(var, 1)); }
}
Impl::PartialSolveResult partialSolveResult =
Impl::PartialSolve(factors, *affectedKeysSet, reorderingMode, (params_.factorization == ISAM2Params::QR));
toc(2,"PartialSolve");
// We now need to permute everything according this partial reordering: the
// delta vector, the global ordering, and the factors we're about to
// re-eliminate. The reordered variables are also mentioned in the
// orphans and the leftover cached factors.
tic(3,"permute global variable index");
variableIndex_.permute(partialSolveResult.fullReordering);
toc(3,"permute global variable index");
tic(4,"permute delta");
delta_.permute(partialSolveResult.fullReordering);
deltaNewton_.permute(partialSolveResult.fullReordering);
RgProd_.permute(partialSolveResult.fullReordering);
toc(4,"permute delta");
tic(5,"permute ordering");
ordering_.permuteWithInverse(partialSolveResult.fullReorderingInverse);
toc(5,"permute ordering");
toc(4,"reorder and eliminate");
tic(6,"re-assemble");
if(partialSolveResult.bayesTree) {
assert(!this->root_);
this->insert(partialSolveResult.bayesTree);
}
toc(6,"re-assemble");
// 4. Insert the orphans back into the new Bayes tree.
tic(7,"orphans");
tic(1,"permute");
BOOST_FOREACH(sharedClique orphan, orphans) {
(void)orphan->permuteSeparatorWithInverse(partialSolveResult.fullReorderingInverse);
}
toc(1,"permute");
tic(2,"insert");
// add orphans to the bottom of the new tree
BOOST_FOREACH(sharedClique orphan, orphans) {
// Because the affectedKeysSelector is sorted, the orphan separator keys
// will be sorted correctly according to the new elimination order after
// applying the permutation, so findParentClique, which looks for the
// lowest-ordered parent, will still work.
Index parentRepresentative = Base::findParentClique((*orphan)->parents());
sharedClique parent = (*this)[parentRepresentative];
parent->children_ += orphan;
orphan->parent_ = parent; // set new parent!
}
toc(2,"insert");
toc(7,"orphans");
toc(4,"incremental");
return affectedKeysSet;
}
}
/* ************************************************************************* */
ISAM2Result ISAM2::update(
const NonlinearFactorGraph& newFactors, const Values& newTheta, const FastVector<size_t>& removeFactorIndices,
const boost::optional<FastMap<Key,int> >& constrainedKeys, bool force_relinearize) {
const bool debug = ISDEBUG("ISAM2 update");
const bool verbose = ISDEBUG("ISAM2 update verbose");
static int count = 0;
count++;
lastAffectedVariableCount = 0;
lastAffectedFactorCount = 0;
lastAffectedCliqueCount = 0;
lastAffectedMarkedCount = 0;
lastBacksubVariableCount = 0;
lastNnzTop = 0;
ISAM2Result result;
const bool relinearizeThisStep = force_relinearize || (params_.enableRelinearization && count % params_.relinearizeSkip == 0);
if(verbose) {
cout << "ISAM2::update\n";
this->print("ISAM2: ");
}
// Update delta if we need it to check relinearization later
if(relinearizeThisStep) {
tic(0, "updateDelta");
updateDelta(disableReordering);
toc(0, "updateDelta");
}
tic(1,"push_back factors");
// Add the new factor indices to the result struct
result.newFactorsIndices.resize(newFactors.size());
for(size_t i=0; i<newFactors.size(); ++i)
result.newFactorsIndices[i] = i + nonlinearFactors_.size();
// 1. Add any new factors \Factors:=\Factors\cup\Factors'.
if(debug || verbose) newFactors.print("The new factors are: ");
nonlinearFactors_.push_back(newFactors);
// Remove the removed factors
NonlinearFactorGraph removeFactors; removeFactors.reserve(removeFactorIndices.size());
BOOST_FOREACH(size_t index, removeFactorIndices) {
removeFactors.push_back(nonlinearFactors_[index]);
nonlinearFactors_.remove(index);
}
// Remove removed factors from the variable index so we do not attempt to relinearize them
variableIndex_.remove(removeFactorIndices, *removeFactors.symbolic(ordering_));
toc(1,"push_back factors");
tic(2,"add new variables");
// 2. Initialize any new variables \Theta_{new} and add \Theta:=\Theta\cup\Theta_{new}.
Impl::AddVariables(newTheta, theta_, delta_, deltaNewton_, RgProd_, deltaReplacedMask_, ordering_, Base::nodes_);
toc(2,"add new variables");
tic(3,"evaluate error before");
if(params_.evaluateNonlinearError)
result.errorBefore.reset(nonlinearFactors_.error(calculateEstimate()));
toc(3,"evaluate error before");
tic(4,"gather involved keys");
// 3. Mark linear update
FastSet<Index> markedKeys = Impl::IndicesFromFactors(ordering_, newFactors); // Get keys from new factors
// Also mark keys involved in removed factors
{
FastSet<Index> markedRemoveKeys = Impl::IndicesFromFactors(ordering_, removeFactors); // Get keys involved in removed factors
markedKeys.insert(markedRemoveKeys.begin(), markedRemoveKeys.end()); // Add to the overall set of marked keys
}
// NOTE: we use assign instead of the iterator constructor here because this
// is a vector of size_t, so the constructor unintentionally resolves to
// vector(size_t count, Index value) instead of the iterator constructor.
FastVector<Index> newKeys; newKeys.assign(markedKeys.begin(), markedKeys.end()); // Make a copy of these, as we'll soon add to them
toc(4,"gather involved keys");
// Check relinearization if we're at the nth step, or we are using a looser loop relin threshold
if (relinearizeThisStep) {
tic(5,"gather relinearize keys");
vector<bool> markedRelinMask(ordering_.nVars(), false);
// 4. Mark keys in \Delta above threshold \beta: J=\{\Delta_{j}\in\Delta|\Delta_{j}\geq\beta\}.
FastSet<Index> relinKeys = Impl::CheckRelinearization(delta_, ordering_, params_.relinearizeThreshold);
if(disableReordering) relinKeys = Impl::CheckRelinearization(delta_, ordering_, 0.0); // This is used for debugging
// Add the variables being relinearized to the marked keys
BOOST_FOREACH(const Index j, relinKeys) { markedRelinMask[j] = true; }
markedKeys.insert(relinKeys.begin(), relinKeys.end());
toc(5,"gather relinearize keys");
tic(6,"fluid find_all");
// 5. Mark all cliques that involve marked variables \Theta_{J} and all their ancestors.
if (!relinKeys.empty() && this->root())
Impl::FindAll(this->root(), markedKeys, markedRelinMask); // add other cliques that have the marked ones in the separator
toc(6,"fluid find_all");
tic(7,"expmap");
// 6. Update linearization point for marked variables: \Theta_{J}:=\Theta_{J}+\Delta_{J}.
if (!relinKeys.empty())
Impl::ExpmapMasked(theta_, delta_, ordering_, markedRelinMask, delta_);
toc(7,"expmap");
result.variablesRelinearized = markedKeys.size();
#ifndef NDEBUG
lastRelinVariables_ = markedRelinMask;
#endif
} else {
result.variablesRelinearized = 0;
#ifndef NDEBUG
lastRelinVariables_ = vector<bool>(ordering_.nVars(), false);
#endif
}
tic(8,"linearize new");
tic(1,"linearize");
// 7. Linearize new factors
FactorGraph<GaussianFactor>::shared_ptr linearFactors = newFactors.linearize(theta_, ordering_);
toc(1,"linearize");
tic(2,"augment VI");
// Augment the variable index with the new factors
variableIndex_.augment(*linearFactors);
toc(2,"augment VI");
toc(8,"linearize new");
tic(9,"recalculate");
// 8. Redo top of Bayes tree
// Convert constrained symbols to indices
boost::optional<FastMap<Index,int> > constrainedIndices;
if(constrainedKeys) {
constrainedIndices = FastMap<Index,int>();
typedef pair<const Key, int> Key_Group;
BOOST_FOREACH(Key_Group key_group, *constrainedKeys) {
constrainedIndices->insert(make_pair(ordering_[key_group.first], key_group.second));
}
}
boost::shared_ptr<FastSet<Index> > replacedKeys;
if(!markedKeys.empty() || !newKeys.empty())
replacedKeys = recalculate(markedKeys, newKeys, linearFactors, constrainedIndices, result);
// Update replaced keys mask (accumulates until back-substitution takes place)
if(replacedKeys) {
BOOST_FOREACH(const Index var, *replacedKeys) {
deltaReplacedMask_[var] = true; } }
toc(9,"recalculate");
//tic(9,"solve");
// 9. Solve
if(debug) delta_.print("delta_: ");
//toc(9,"solve");
tic(10,"evaluate error after");
if(params_.evaluateNonlinearError)
result.errorAfter.reset(nonlinearFactors_.error(calculateEstimate()));
toc(10,"evaluate error after");
result.cliques = this->nodes().size();
deltaDoglegUptodate_ = false;
deltaUptodate_ = false;
return result;
}
/* ************************************************************************* */
void ISAM2::updateDelta(bool forceFullSolve) const {
if(params_.optimizationParams.type() == typeid(ISAM2GaussNewtonParams)) {
// If using Gauss-Newton, update with wildfireThreshold
const ISAM2GaussNewtonParams& gaussNewtonParams =
boost::get<ISAM2GaussNewtonParams>(params_.optimizationParams);
const double effectiveWildfireThreshold = forceFullSolve ? 0.0 : gaussNewtonParams.wildfireThreshold;
tic(0, "Wildfire update");
lastBacksubVariableCount = Impl::UpdateDelta(this->root(), deltaReplacedMask_, delta_, effectiveWildfireThreshold);
toc(0, "Wildfire update");
} else if(params_.optimizationParams.type() == typeid(ISAM2DoglegParams)) {
// If using Dogleg, do a Dogleg step
const ISAM2DoglegParams& doglegParams =
boost::get<ISAM2DoglegParams>(params_.optimizationParams);
// Do one Dogleg iteration
tic(1, "Dogleg Iterate");
DoglegOptimizerImpl::IterationResult doglegResult(DoglegOptimizerImpl::Iterate(
*doglegDelta_, doglegParams.adaptationMode, *this, nonlinearFactors_, theta_, ordering_, nonlinearFactors_.error(theta_), doglegParams.verbose));
toc(1, "Dogleg Iterate");
tic(2, "Copy dx_d");
// Update Delta and linear step
doglegDelta_ = doglegResult.Delta;
delta_.permutation() = Permutation::Identity(delta_.size()); // Dogleg solves for the full delta so there is no permutation
delta_.container() = doglegResult.dx_d; // Copy the VectorValues containing with the linear solution
toc(2, "Copy dx_d");
}
deltaUptodate_ = true;
}
/* ************************************************************************* */
Values ISAM2::calculateEstimate() const {
// We use ExpmapMasked here instead of regular expmap because the former
// handles Permuted<VectorValues>
tic(1, "Copy Values");
Values ret(theta_);
toc(1, "Copy Values");
tic(2, "getDelta");
const Permuted<VectorValues>& delta(getDelta());
toc(2, "getDelta");
tic(3, "Expmap");
vector<bool> mask(ordering_.nVars(), true);
Impl::ExpmapMasked(ret, delta, ordering_, mask);
toc(3, "Expmap");
return ret;
}
/* ************************************************************************* */
Values ISAM2::calculateBestEstimate() const {
VectorValues delta(theta_.dims(ordering_));
internal::optimizeInPlace<Base>(this->root(), delta);
return theta_.retract(delta, ordering_);
}
/* ************************************************************************* */
const Permuted<VectorValues>& ISAM2::getDelta() const {
if(!deltaUptodate_)
updateDelta();
return delta_;
}
/* ************************************************************************* */
VectorValues optimize(const ISAM2& isam) {
tic(0, "allocateVectorValues");
VectorValues delta = *allocateVectorValues(isam);
toc(0, "allocateVectorValues");
optimizeInPlace(isam, delta);
return delta;
}
/* ************************************************************************* */
void optimizeInPlace(const ISAM2& isam, VectorValues& delta) {
// We may need to update the solution calcaulations
if(!isam.deltaDoglegUptodate_) {
tic(1, "UpdateDoglegDeltas");
double wildfireThreshold = 0.0;
if(isam.params().optimizationParams.type() == typeid(ISAM2GaussNewtonParams))
wildfireThreshold = boost::get<ISAM2GaussNewtonParams>(isam.params().optimizationParams).wildfireThreshold;
else if(isam.params().optimizationParams.type() == typeid(ISAM2DoglegParams))
wildfireThreshold = boost::get<ISAM2DoglegParams>(isam.params().optimizationParams).wildfireThreshold;
else
assert(false);
ISAM2::Impl::UpdateDoglegDeltas(isam, wildfireThreshold, isam.deltaReplacedMask_, isam.deltaNewton_, isam.RgProd_);
isam.deltaDoglegUptodate_ = true;
toc(1, "UpdateDoglegDeltas");
}
tic(2, "copy delta");
delta = isam.deltaNewton_;
toc(2, "copy delta");
}
/* ************************************************************************* */
VectorValues optimizeGradientSearch(const ISAM2& isam) {
tic(0, "Allocate VectorValues");
VectorValues grad = *allocateVectorValues(isam);
toc(0, "Allocate VectorValues");
optimizeGradientSearchInPlace(isam, grad);
return grad;
}
/* ************************************************************************* */
void optimizeGradientSearchInPlace(const ISAM2& isam, VectorValues& grad) {
// We may need to update the solution calcaulations
if(!isam.deltaDoglegUptodate_) {
tic(1, "UpdateDoglegDeltas");
double wildfireThreshold = 0.0;
if(isam.params().optimizationParams.type() == typeid(ISAM2GaussNewtonParams))
wildfireThreshold = boost::get<ISAM2GaussNewtonParams>(isam.params().optimizationParams).wildfireThreshold;
else if(isam.params().optimizationParams.type() == typeid(ISAM2DoglegParams))
wildfireThreshold = boost::get<ISAM2DoglegParams>(isam.params().optimizationParams).wildfireThreshold;
else
assert(false);
ISAM2::Impl::UpdateDoglegDeltas(isam, wildfireThreshold, isam.deltaReplacedMask_, isam.deltaNewton_, isam.RgProd_);
isam.deltaDoglegUptodate_ = true;
toc(1, "UpdateDoglegDeltas");
}
tic(2, "Compute Gradient");
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
gradientAtZero(isam, grad);
double gradientSqNorm = grad.dot(grad);
toc(2, "Compute Gradient");
tic(3, "Compute minimizing step size");
// Compute minimizing step size
double RgNormSq = isam.RgProd_.container().vector().squaredNorm();
double step = -gradientSqNorm / RgNormSq;
toc(3, "Compute minimizing step size");
tic(4, "Compute point");
// Compute steepest descent point
grad.vector() *= step;
toc(4, "Compute point");
}
/* ************************************************************************* */
VectorValues gradient(const ISAM2& bayesTree, const VectorValues& x0) {
return gradient(FactorGraph<JacobianFactor>(bayesTree), x0);
}
/* ************************************************************************* */
static void gradientAtZeroTreeAdder(const boost::shared_ptr<ISAM2Clique>& root, VectorValues& g) {
// Loop through variables in each clique, adding contributions
int variablePosition = 0;
for(GaussianConditional::const_iterator jit = root->conditional()->begin(); jit != root->conditional()->end(); ++jit) {
const int dim = root->conditional()->dim(jit);
g[*jit] += root->gradientContribution().segment(variablePosition, dim);
variablePosition += dim;
}
// Recursively add contributions from children
typedef boost::shared_ptr<ISAM2Clique> sharedClique;
BOOST_FOREACH(const sharedClique& child, root->children()) {
gradientAtZeroTreeAdder(child, g);
}
}
/* ************************************************************************* */
void gradientAtZero(const ISAM2& bayesTree, VectorValues& g) {
// Zero-out gradient
g.setZero();
// Sum up contributions for each clique
gradientAtZeroTreeAdder(bayesTree.root(), g);
}
}
/// namespace gtsam

View File

@ -12,7 +12,7 @@
/**
* @file ISAM2.h
* @brief Incremental update functionality (ISAM2) for BayesTree, with fluid relinearization.
* @author Michael Kaess
* @author Michael Kaess, Richard Roberts
*/
// \callgraph
@ -21,10 +21,8 @@
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
#include <gtsam/inference/BayesTree.h>
#include <gtsam/linear/GaussianBayesTree.h>
// Workaround for boost < 1.47, see note in file
//#include <gtsam/base/boost_variant_with_workaround.h>
#include <boost/variant.hpp>
namespace gtsam {
@ -56,15 +54,18 @@ struct ISAM2GaussNewtonParams {
*/
struct ISAM2DoglegParams {
double initialDelta; ///< The initial trust region radius for Dogleg
double wildfireThreshold; ///< Continue updating the linear delta only when changes are above this threshold (default: 0.001)
DoglegOptimizerImpl::TrustRegionAdaptationMode adaptationMode; ///< See description in DoglegOptimizerImpl::TrustRegionAdaptationMode
bool verbose; ///< Whether Dogleg prints iteration and convergence information
/** Specify parameters as constructor arguments */
ISAM2DoglegParams(
double _initialDelta = 1.0, ///< see ISAM2DoglegParams public variables, ISAM2DoglegParams::initialDelta
DoglegOptimizerImpl::TrustRegionAdaptationMode _adaptationMode = DoglegOptimizerImpl::SEARCH_EACH_ITERATION, ///< see ISAM2DoglegParams public variables, ISAM2DoglegParams::adaptationMode
bool _verbose = false ///< see ISAM2DoglegParams public variables, ISAM2DoglegParams::verbose
) : initialDelta(_initialDelta), adaptationMode(_adaptationMode), verbose(_verbose) {}
double _initialDelta = 1.0, ///< see ISAM2DoglegParams::initialDelta
double _wildfireThreshold = 1e-5, ///< see ISAM2DoglegParams::wildfireThreshold
DoglegOptimizerImpl::TrustRegionAdaptationMode _adaptationMode = DoglegOptimizerImpl::SEARCH_EACH_ITERATION, ///< see ISAM2DoglegParams::adaptationMode
bool _verbose = false ///< see ISAM2DoglegParams::verbose
) : initialDelta(_initialDelta), wildfireThreshold(_wildfireThreshold),
adaptationMode(_adaptationMode), verbose(_verbose) {}
};
/**
@ -106,6 +107,17 @@ struct ISAM2Params {
bool evaluateNonlinearError; ///< Whether to evaluate the nonlinear error before and after the update, to return in ISAM2Result from update()
enum Factorization { LDL, QR };
/** Specifies whether to use QR or LDL numerical factorization (default: LDL).
* LDL is faster but potentially numerically unstable for poorly-conditioned problems, which can occur when
* uncertainty is very low in some variables (or dimensions of variables) and very high in others. QR is
* slower but more numerically stable in poorly-conditioned problems. We suggest using the default of LDL
* unless gtsam sometimes throws NegativeMatrixException when your problem's Hessian is actually positive
* definite. For positive definite problems, numerical error accumulation can cause the problem to become
* numerically negative or indefinite as solving proceeds, especially when using LDL.
*/
Factorization factorization;
KeyFormatter keyFormatter; ///< A KeyFormatter for when keys are printed during debugging (default: DefaultKeyFormatter)
/** Specify parameters as constructor arguments */
@ -115,10 +127,12 @@ struct ISAM2Params {
int _relinearizeSkip = 10, ///< see ISAM2Params::relinearizeSkip
bool _enableRelinearization = true, ///< see ISAM2Params::enableRelinearization
bool _evaluateNonlinearError = false, ///< see ISAM2Params::evaluateNonlinearError
Factorization _factorization = ISAM2Params::LDL, ///< see ISAM2Params::factorization
const KeyFormatter& _keyFormatter = DefaultKeyFormatter ///< see ISAM2::Params::keyFormatter
) : optimizationParams(_optimizationParams), relinearizeThreshold(_relinearizeThreshold),
relinearizeSkip(_relinearizeSkip), enableRelinearization(_enableRelinearization),
evaluateNonlinearError(_evaluateNonlinearError), keyFormatter(_keyFormatter) {}
evaluateNonlinearError(_evaluateNonlinearError), factorization(_factorization),
keyFormatter(_keyFormatter) {}
};
/**
@ -181,17 +195,16 @@ struct ISAM2Result {
FastVector<size_t> newFactorsIndices;
};
template<class CONDITIONAL>
struct ISAM2Clique : public BayesTreeCliqueBase<ISAM2Clique<CONDITIONAL>, CONDITIONAL> {
struct ISAM2Clique : public BayesTreeCliqueBase<ISAM2Clique, GaussianConditional> {
typedef ISAM2Clique<CONDITIONAL> This;
typedef BayesTreeCliqueBase<This,CONDITIONAL> Base;
typedef ISAM2Clique This;
typedef BayesTreeCliqueBase<This,GaussianConditional> Base;
typedef boost::shared_ptr<This> shared_ptr;
typedef boost::weak_ptr<This> weak_ptr;
typedef CONDITIONAL ConditionalType;
typedef typename ConditionalType::shared_ptr sharedConditional;
typedef GaussianConditional ConditionalType;
typedef ConditionalType::shared_ptr sharedConditional;
typename Base::FactorType::shared_ptr cachedFactor_;
Base::FactorType::shared_ptr cachedFactor_;
Vector gradientContribution_;
/** Construct from a conditional */
@ -199,7 +212,7 @@ struct ISAM2Clique : public BayesTreeCliqueBase<ISAM2Clique<CONDITIONAL>, CONDIT
throw runtime_error("ISAM2Clique should always be constructed with the elimination result constructor"); }
/** Construct from an elimination result */
ISAM2Clique(const std::pair<sharedConditional, boost::shared_ptr<typename ConditionalType::FactorType> >& result) :
ISAM2Clique(const std::pair<sharedConditional, boost::shared_ptr<ConditionalType::FactorType> >& result) :
Base(result.first), cachedFactor_(result.second), gradientContribution_(result.first->get_R().cols() + result.first->get_S().cols()) {
// Compute gradient contribution
const ConditionalType& conditional(*result.first);
@ -211,13 +224,13 @@ struct ISAM2Clique : public BayesTreeCliqueBase<ISAM2Clique<CONDITIONAL>, CONDIT
shared_ptr clone() const {
shared_ptr copy(new ISAM2Clique(make_pair(
sharedConditional(new ConditionalType(*Base::conditional_)),
cachedFactor_ ? cachedFactor_->clone() : typename Base::FactorType::shared_ptr())));
cachedFactor_ ? cachedFactor_->clone() : Base::FactorType::shared_ptr())));
copy->gradientContribution_ = gradientContribution_;
return copy;
}
/** Access the cached factor */
typename Base::FactorType::shared_ptr& cachedFactor() { return cachedFactor_; }
Base::FactorType::shared_ptr& cachedFactor() { return cachedFactor_; }
/** Access the gradient contribution */
const Vector& gradientContribution() const { return gradientContribution_; }
@ -269,8 +282,7 @@ private:
* estimate of all variables.
*
*/
template<class CONDITIONAL, class GRAPH = NonlinearFactorGraph>
class ISAM2: public BayesTree<CONDITIONAL, ISAM2Clique<CONDITIONAL> > {
class ISAM2: public BayesTree<GaussianConditional, ISAM2Clique> {
protected:
@ -296,6 +308,12 @@ protected:
*/
mutable Permuted<VectorValues> delta_;
VectorValues deltaNewtonUnpermuted_;
mutable Permuted<VectorValues> deltaNewton_;
VectorValues RgProdUnpermuted_;
mutable Permuted<VectorValues> RgProd_;
mutable bool deltaDoglegUptodate_;
/** Indicates whether the current delta is up-to-date, only used
* internally - delta will always be updated if necessary when it is
* requested with getDelta() or calculateEstimate().
@ -316,7 +334,7 @@ protected:
mutable std::vector<bool> deltaReplacedMask_;
/** All original nonlinear factors are stored here to use during relinearization */
GRAPH nonlinearFactors_;
NonlinearFactorGraph nonlinearFactors_;
/** The current elimination ordering Symbols to Index (integer) keys.
*
@ -335,13 +353,9 @@ private:
std::vector<bool> lastRelinVariables_;
#endif
typedef HessianFactor CacheFactor;
public:
typedef BayesTree<CONDITIONAL,ISAM2Clique<CONDITIONAL> > Base; ///< The BayesTree base class
typedef ISAM2<CONDITIONAL> This; ///< This class
typedef GRAPH Graph;
typedef BayesTree<GaussianConditional,ISAM2Clique> Base; ///< The BayesTree base class
/** Create an empty ISAM2 instance */
ISAM2(const ISAM2Params& params);
@ -349,17 +363,22 @@ public:
/** Create an empty ISAM2 instance using the default set of parameters (see ISAM2Params) */
ISAM2();
typedef typename Base::Clique Clique; ///< A clique
typedef typename Base::sharedClique sharedClique; ///< Shared pointer to a clique
typedef typename Base::Cliques Cliques; ///< List of Clique typedef from base class
typedef Base::Clique Clique; ///< A clique
typedef Base::sharedClique sharedClique; ///< Shared pointer to a clique
typedef Base::Cliques Cliques; ///< List of Clique typedef from base class
void cloneTo(boost::shared_ptr<This>& newISAM2) const {
void cloneTo(boost::shared_ptr<ISAM2>& newISAM2) const {
boost::shared_ptr<Base> bayesTree = boost::static_pointer_cast<Base>(newISAM2);
Base::cloneTo(bayesTree);
newISAM2->theta_ = theta_;
newISAM2->variableIndex_ = variableIndex_;
newISAM2->deltaUnpermuted_ = deltaUnpermuted_;
newISAM2->delta_ = delta_;
newISAM2->deltaNewtonUnpermuted_ = deltaNewtonUnpermuted_;
newISAM2->deltaNewton_ = deltaNewton_;
newISAM2->RgProdUnpermuted_ = RgProdUnpermuted_;
newISAM2->RgProd_ = RgProd_;
newISAM2->deltaDoglegUptodate_ = deltaDoglegUptodate_;
newISAM2->deltaUptodate_ = deltaUptodate_;
newISAM2->deltaReplacedMask_ = deltaReplacedMask_;
newISAM2->nonlinearFactors_ = nonlinearFactors_;
@ -395,9 +414,9 @@ public:
* (Params::relinearizeSkip).
* @return An ISAM2Result struct containing information about the update
*/
ISAM2Result update(const GRAPH& newFactors = GRAPH(), const Values& newTheta = Values(),
ISAM2Result update(const NonlinearFactorGraph& newFactors = NonlinearFactorGraph(), const Values& newTheta = Values(),
const FastVector<size_t>& removeFactorIndices = FastVector<size_t>(),
const boost::optional<FastSet<Key> >& constrainedKeys = boost::none,
const boost::optional<FastMap<Key,int> >& constrainedKeys = boost::none,
bool force_relinearize = false);
/** Access the current linearization point */
@ -432,7 +451,7 @@ public:
const Permuted<VectorValues>& getDelta() const;
/** Access the set of nonlinear factors */
const GRAPH& getFactorsUnsafe() const { return nonlinearFactors_; }
const NonlinearFactorGraph& getFactorsUnsafe() const { return nonlinearFactors_; }
/** Access the current ordering */
const Ordering& getOrdering() const { return ordering_; }
@ -452,16 +471,101 @@ private:
FastList<size_t> getAffectedFactors(const FastList<Index>& keys) const;
FactorGraph<GaussianFactor>::shared_ptr relinearizeAffectedFactors(const FastList<Index>& affectedKeys) const;
FactorGraph<CacheFactor> getCachedBoundaryFactors(Cliques& orphans);
GaussianFactorGraph getCachedBoundaryFactors(Cliques& orphans);
boost::shared_ptr<FastSet<Index> > recalculate(const FastSet<Index>& markedKeys,
const FastVector<Index>& newKeys, const FactorGraph<GaussianFactor>::shared_ptr newFactors,
const boost::optional<FastSet<size_t> >& constrainKeys, ISAM2Result& result);
const boost::optional<FastMap<Index,int> >& constrainKeys, ISAM2Result& result);
// void linear_update(const GaussianFactorGraph& newFactors);
void updateDelta(bool forceFullSolve = false) const;
friend void optimizeInPlace(const ISAM2&, VectorValues&);
friend void optimizeGradientSearchInPlace(const ISAM2&, VectorValues&);
}; // ISAM2
/** Get the linear delta for the ISAM2 object, unpermuted the delta returned by ISAM2::getDelta() */
VectorValues optimize(const ISAM2& isam);
/** Get the linear delta for the ISAM2 object, unpermuted the delta returned by ISAM2::getDelta() */
void optimizeInPlace(const ISAM2& isam, VectorValues& delta);
/// Optimize the BayesTree, starting from the root.
/// @param replaced Needs to contain
/// all variables that are contained in the top of the Bayes tree that has been
/// redone.
/// @param delta The current solution, an offset from the linearization
/// point.
/// @param threshold The maximum change against the PREVIOUS delta for
/// non-replaced variables that can be ignored, ie. the old delta entry is kept
/// and recursive backsubstitution might eventually stop if none of the changed
/// variables are contained in the subtree.
/// @return The number of variables that were solved for
template<class CLIQUE>
int optimizeWildfire(const boost::shared_ptr<CLIQUE>& root,
double threshold, const std::vector<bool>& replaced, Permuted<VectorValues>& delta);
/**
* Optimize along the gradient direction, with a closed-form computation to
* perform the line search. The gradient is computed about \f$ \delta x=0 \f$.
*
* This function returns \f$ \delta x \f$ that minimizes a reparametrized
* problem. The error function of a GaussianBayesNet is
*
* \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x + \frac{1}{2} \delta x^T R^T R \delta x \f]
*
* with gradient and Hessian
*
* \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
*
* This function performs the line search in the direction of the
* gradient evaluated at \f$ g = g(\delta x = 0) \f$ with step size
* \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g) \f$:
*
* \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
*
* Optimizing by setting the derivative to zero yields
* \f$ \hat \alpha = (-g^T g) / (g^T G g) \f$. For efficiency, this function
* evaluates the denominator without computing the Hessian \f$ G \f$, returning
*
* \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f]
*/
VectorValues optimizeGradientSearch(const ISAM2& isam);
/** In-place version of optimizeGradientSearch requiring pre-allocated VectorValues \c x */
void optimizeGradientSearchInPlace(const ISAM2& isam, VectorValues& grad);
/// calculate the number of non-zero entries for the tree starting at clique (use root for complete matrix)
template<class CLIQUE>
int calculate_nnz(const boost::shared_ptr<CLIQUE>& clique);
/**
* Compute the gradient of the energy function,
* \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
* centered around \f$ x = x_0 \f$.
* The gradient is \f$ R^T(Rx-d) \f$.
* This specialized version is used with ISAM2, where each clique stores its
* gradient contribution.
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
* @param x0 The center about which to compute the gradient
* @return The gradient as a VectorValues
*/
VectorValues gradient(const ISAM2& bayesTree, const VectorValues& x0);
/**
* Compute the gradient of the energy function,
* \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d \right\Vert^2 \f$,
* centered around zero.
* The gradient about zero is \f$ -R^T d \f$. See also gradient(const GaussianBayesNet&, const VectorValues&).
* This specialized version is used with ISAM2, where each clique stores its
* gradient contribution.
* @param bayesTree The Gaussian Bayes Tree $(R,d)$
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
* @return The gradient as a VectorValues
*/
void gradientAtZero(const ISAM2& bayesTree, VectorValues& g);
} /// namespace gtsam
#include <gtsam/nonlinear/ISAM2-inl.h>
#include <gtsam/nonlinear/ISAM2-impl.h>

View File

@ -120,6 +120,8 @@ namespace gtsam {
typedef boost::transform_iterator<
boost::function1<ConstKeyValuePair, const ConstKeyValuePtrPair&>, KeyValueMap::const_reverse_iterator> const_reverse_iterator;
typedef KeyValuePair value_type;
private:
template<class ValueType>
struct _KeyValuePair {
@ -146,6 +148,7 @@ namespace gtsam {
/** A key-value pair, with the value a specific derived Value type. */
typedef _KeyValuePair<ValueType> KeyValuePair;
typedef _ConstKeyValuePair<ValueType> ConstKeyValuePair;
typedef KeyValuePair value_type;
typedef
boost::transform_iterator<
@ -211,6 +214,7 @@ namespace gtsam {
public:
/** A const key-value pair, with the value a specific derived Value type. */
typedef _ConstKeyValuePair<ValueType> KeyValuePair;
typedef KeyValuePair value_type;
typedef typename Filtered<ValueType>::const_const_iterator iterator;
typedef typename Filtered<ValueType>::const_const_iterator const_iterator;

View File

@ -13,12 +13,18 @@ set(slam_local_libs
ccolamd
)
# Files to exclude from compilation of tests and timing scripts
set(slam_excluded_files
# "${CMAKE_CURRENT_SOURCE_DIR}/tests/testTypedDiscreteFactor.cpp" # Example of excluding a test
"" # Add to this list, with full path, to exclude
)
# Build tests
if (GTSAM_BUILD_TESTS)
gtsam_add_tests(slam "${slam_local_libs}")
gtsam_add_subdir_tests(slam "${slam_local_libs}" "gtsam-static" "${slam_excluded_files}")
endif(GTSAM_BUILD_TESTS)
# Build timing scripts
if (GTSAM_BUILD_TIMING)
gtsam_add_timing(slam "${slam_local_libs}")
gtsam_add_subdir_timing(slam "${slam_local_libs}" "gtsam-static" "${slam_excluded_files}")
endif(GTSAM_BUILD_TIMING)

View File

@ -1,16 +1,11 @@
if (GTSAM_BUILD_CONVENIENCE_LIBRARIES)
set(convenience_libs
slam
nonlinear
linear
inference
geometry
base
ccolamd)
else (GTSAM_BUILD_CONVENIENCE_LIBRARIES)
set(convenience_libs
gtsam-static)
endif (GTSAM_BUILD_CONVENIENCE_LIBRARIES)
set(tests_local_libs
slam
nonlinear
linear
inference
geometry
base
ccolamd)
# exclude certain files
# note the source dir on each
@ -20,35 +15,26 @@ set (tests_exclude
# Build tests
if (GTSAM_BUILD_TESTS)
# Subdirectory target for tests
add_custom_target(check.tests COMMAND ${CMAKE_CTEST_COMMAND})
file(GLOB tests_srcs "test*.cpp")
list(REMOVE_ITEM tests_srcs ${tests_exclude})
foreach(test_src ${tests_srcs})
get_filename_component(test_base ${test_src} NAME_WE)
set( test_bin ${test_base} )
message(STATUS "Adding Test ${test_bin}")
add_executable(${test_bin} ${test_src})
add_dependencies(check.tests ${test_bin})
add_dependencies(check ${test_bin})
add_test(${test_base} ${EXECUTABLE_OUTPUT_PATH}${test_bin})
target_link_libraries(${test_bin} ${convenience_libs} CppUnitLite ${Boost_LIBRARIES})
add_custom_target(${test_bin}.run ${EXECUTABLE_OUTPUT_PATH}${test_bin} ${ARGN})
endforeach(test_src)
set(is_test TRUE)
# Build grouped tests
gtsam_add_grouped_scripts("tests" # Use subdirectory as group label
"test*.cpp" check "Test" # Standard for all tests
"${tests_local_libs}" "gtsam-static" "${tests_exclude}" # Pass in linking and exclusion lists
${is_test}) # Set all as tests
endif (GTSAM_BUILD_TESTS)
# Build timing scripts
if (GTSAM_BUILD_TIMING)
# Subdirectory target for timing - does not actually execute the scripts
add_custom_target(timing.tests)
file(GLOB timing_srcs "time*.cpp")
list(REMOVE_ITEM timing_srcs ${tests_exclude})
foreach(time_src ${timing_srcs})
get_filename_component(time_base ${time_src} NAME_WE)
set( time_bin ${time_base} )
message(STATUS "Adding Timing Benchmark ${time_bin}")
add_executable(${time_bin} ${time_src})
add_dependencies(timing.tests ${time_bin})
add_dependencies(timing ${time_bin})
target_link_libraries(${time_bin} ${convenience_libs} CppUnitLite ${Boost_LIBRARIES})
add_custom_target(${time_bin}.run ${EXECUTABLE_OUTPUT_PATH}${time_bin} ${ARGN})
endforeach(time_src)
set(is_test FALSE)
# Build grouped benchmarks
gtsam_add_grouped_scripts("tests" # Use subdirectory as group label
"time*.cpp" timing "Timing Benchmark" # Standard for all timing scripts
"${tests_local_libs}" "gtsam-static" "${tests_exclude}" # Pass in linking and exclusion lists
${is_test})
endif (GTSAM_BUILD_TIMING)

View File

@ -17,7 +17,7 @@ using namespace boost::assign;
#include <gtsam/linear/GaussianBayesNet.h>
#include <gtsam/linear/GaussianSequentialSolver.h>
#include <gtsam/linear/GaussianBayesTree.h>
#include <gtsam/nonlinear/GaussianISAM2.h>
#include <gtsam/nonlinear/ISAM2.h>
#include <gtsam/slam/smallExample.h>
#include <gtsam/slam/planarSLAM.h>
@ -29,7 +29,7 @@ using boost::shared_ptr;
const double tol = 1e-4;
/* ************************************************************************* */
TEST(ISAM2, AddVariables) {
TEST_UNSAFE(ISAM2, AddVariables) {
// Create initial state
Values theta;
@ -48,11 +48,31 @@ TEST(ISAM2, AddVariables) {
Permuted<VectorValues> delta(permutation, deltaUnpermuted);
VectorValues deltaNewtonUnpermuted;
deltaNewtonUnpermuted.insert(0, Vector_(3, .1, .2, .3));
deltaNewtonUnpermuted.insert(1, Vector_(2, .4, .5));
Permutation permutationNewton(2);
permutationNewton[0] = 1;
permutationNewton[1] = 0;
Permuted<VectorValues> deltaNewton(permutationNewton, deltaNewtonUnpermuted);
VectorValues deltaRgUnpermuted;
deltaRgUnpermuted.insert(0, Vector_(3, .1, .2, .3));
deltaRgUnpermuted.insert(1, Vector_(2, .4, .5));
Permutation permutationRg(2);
permutationRg[0] = 1;
permutationRg[1] = 0;
Permuted<VectorValues> deltaRg(permutationRg, deltaRgUnpermuted);
vector<bool> replacedKeys(2, false);
Ordering ordering; ordering += planarSLAM::PointKey(0), planarSLAM::PoseKey(0);
GaussianISAM2<>::Nodes nodes(2);
ISAM2::Nodes nodes(2);
// Verify initial state
LONGS_EQUAL(0, ordering[planarSLAM::PointKey(0)]);
@ -78,19 +98,47 @@ TEST(ISAM2, AddVariables) {
Permuted<VectorValues> deltaExpected(permutationExpected, deltaUnpermutedExpected);
VectorValues deltaNewtonUnpermutedExpected;
deltaNewtonUnpermutedExpected.insert(0, Vector_(3, .1, .2, .3));
deltaNewtonUnpermutedExpected.insert(1, Vector_(2, .4, .5));
deltaNewtonUnpermutedExpected.insert(2, Vector_(3, 0.0, 0.0, 0.0));
Permutation permutationNewtonExpected(3);
permutationNewtonExpected[0] = 1;
permutationNewtonExpected[1] = 0;
permutationNewtonExpected[2] = 2;
Permuted<VectorValues> deltaNewtonExpected(permutationNewtonExpected, deltaNewtonUnpermutedExpected);
VectorValues deltaRgUnpermutedExpected;
deltaRgUnpermutedExpected.insert(0, Vector_(3, .1, .2, .3));
deltaRgUnpermutedExpected.insert(1, Vector_(2, .4, .5));
deltaRgUnpermutedExpected.insert(2, Vector_(3, 0.0, 0.0, 0.0));
Permutation permutationRgExpected(3);
permutationRgExpected[0] = 1;
permutationRgExpected[1] = 0;
permutationRgExpected[2] = 2;
Permuted<VectorValues> deltaRgExpected(permutationRgExpected, deltaRgUnpermutedExpected);
vector<bool> replacedKeysExpected(3, false);
Ordering orderingExpected; orderingExpected += planarSLAM::PointKey(0), planarSLAM::PoseKey(0), planarSLAM::PoseKey(1);
GaussianISAM2<>::Nodes nodesExpected(
3, GaussianISAM2<>::sharedClique());
ISAM2::Nodes nodesExpected(
3, ISAM2::sharedClique());
// Expand initial state
GaussianISAM2<>::Impl::AddVariables(newTheta, theta, delta, replacedKeys, ordering, nodes);
ISAM2::Impl::AddVariables(newTheta, theta, delta, deltaNewton, deltaRg, replacedKeys, ordering, nodes);
EXPECT(assert_equal(thetaExpected, theta));
EXPECT(assert_equal(deltaUnpermutedExpected, deltaUnpermuted));
EXPECT(assert_equal(deltaExpected.permutation(), delta.permutation()));
EXPECT(assert_equal(deltaNewtonUnpermutedExpected, deltaNewtonUnpermuted));
EXPECT(assert_equal(deltaNewtonExpected.permutation(), deltaNewton.permutation()));
EXPECT(assert_equal(deltaRgUnpermutedExpected, deltaRgUnpermuted));
EXPECT(assert_equal(deltaRgExpected.permutation(), deltaRg.permutation()));
EXPECT(assert_container_equality(replacedKeysExpected, replacedKeys));
EXPECT(assert_equal(orderingExpected, ordering));
}
@ -171,10 +219,10 @@ TEST(ISAM2, optimize2) {
conditional->solveInPlace(expected);
// Clique
GaussianISAM2<>::sharedClique clique(
GaussianISAM2<>::Clique::Create(make_pair(conditional,GaussianFactor::shared_ptr())));
ISAM2::sharedClique clique(
ISAM2::Clique::Create(make_pair(conditional,GaussianFactor::shared_ptr())));
VectorValues actual(theta.dims(ordering));
internal::optimizeInPlace(clique, actual);
internal::optimizeInPlace<ISAM2::Base>(clique, actual);
// expected.print("expected: ");
// actual.print("actual: ");
@ -182,7 +230,7 @@ TEST(ISAM2, optimize2) {
}
/* ************************************************************************* */
bool isam_check(const planarSLAM::Graph& fullgraph, const Values& fullinit, const GaussianISAM2<>& isam) {
bool isam_check(const planarSLAM::Graph& fullgraph, const Values& fullinit, const ISAM2& isam) {
Values actual = isam.calculateEstimate();
Ordering ordering = isam.getOrdering(); // *fullgraph.orderingCOLAMD(fullinit).first;
GaussianFactorGraph linearized = *fullgraph.linearize(fullinit, ordering);
@ -212,7 +260,7 @@ TEST(ISAM2, slamlike_solution_gaussnewton)
SharedDiagonal brNoise = sharedSigmas(Vector_(2, M_PI/100.0, 0.1));
// These variables will be reused and accumulate factors and values
GaussianISAM2<> isam(ISAM2Params(ISAM2GaussNewtonParams(0.001), 0.0, 0, false));
ISAM2 isam(ISAM2Params(ISAM2GaussNewtonParams(0.001), 0.0, 0, false));
Values fullinit;
planarSLAM::Graph fullgraph;
@ -300,7 +348,7 @@ TEST(ISAM2, slamlike_solution_gaussnewton)
CHECK(isam_check(fullgraph, fullinit, isam));
// Check gradient at each node
typedef GaussianISAM2<>::sharedClique sharedClique;
typedef ISAM2::sharedClique sharedClique;
BOOST_FOREACH(const sharedClique& clique, isam.nodes()) {
// Compute expected gradient
FactorGraph<JacobianFactor> jfg;
@ -345,7 +393,7 @@ TEST(ISAM2, slamlike_solution_dogleg)
SharedDiagonal brNoise = sharedSigmas(Vector_(2, M_PI/100.0, 0.1));
// These variables will be reused and accumulate factors and values
GaussianISAM2<> isam(ISAM2Params(ISAM2DoglegParams(1.0), 0.0, 0, false));
ISAM2 isam(ISAM2Params(ISAM2DoglegParams(1.0), 0.0, 0, false));
Values fullinit;
planarSLAM::Graph fullgraph;
@ -433,7 +481,273 @@ TEST(ISAM2, slamlike_solution_dogleg)
CHECK(isam_check(fullgraph, fullinit, isam));
// Check gradient at each node
typedef GaussianISAM2<>::sharedClique sharedClique;
typedef ISAM2::sharedClique sharedClique;
BOOST_FOREACH(const sharedClique& clique, isam.nodes()) {
// Compute expected gradient
FactorGraph<JacobianFactor> jfg;
jfg.push_back(JacobianFactor::shared_ptr(new JacobianFactor(*clique->conditional())));
VectorValues expectedGradient(*allocateVectorValues(isam));
gradientAtZero(jfg, expectedGradient);
// Compare with actual gradients
int variablePosition = 0;
for(GaussianConditional::const_iterator jit = clique->conditional()->begin(); jit != clique->conditional()->end(); ++jit) {
const int dim = clique->conditional()->dim(jit);
Vector actual = clique->gradientContribution().segment(variablePosition, dim);
EXPECT(assert_equal(expectedGradient[*jit], actual));
variablePosition += dim;
}
LONGS_EQUAL(clique->gradientContribution().rows(), variablePosition);
}
// Check gradient
VectorValues expectedGradient(*allocateVectorValues(isam));
gradientAtZero(FactorGraph<JacobianFactor>(isam), expectedGradient);
VectorValues expectedGradient2(gradient(FactorGraph<JacobianFactor>(isam), VectorValues::Zero(expectedGradient)));
VectorValues actualGradient(*allocateVectorValues(isam));
gradientAtZero(isam, actualGradient);
EXPECT(assert_equal(expectedGradient2, expectedGradient));
EXPECT(assert_equal(expectedGradient, actualGradient));
}
/* ************************************************************************* */
TEST(ISAM2, slamlike_solution_gaussnewton_qr)
{
// SETDEBUG("ISAM2 update", true);
// SETDEBUG("ISAM2 update verbose", true);
// SETDEBUG("ISAM2 recalculate", true);
// Pose and landmark key types from planarSLAM
using planarSLAM::PoseKey;
using planarSLAM::PointKey;
// Set up parameters
SharedDiagonal odoNoise = sharedSigmas(Vector_(3, 0.1, 0.1, M_PI/100.0));
SharedDiagonal brNoise = sharedSigmas(Vector_(2, M_PI/100.0, 0.1));
// These variables will be reused and accumulate factors and values
ISAM2 isam(ISAM2Params(ISAM2GaussNewtonParams(0.001), 0.0, 0, false, false, ISAM2Params::QR));
Values fullinit;
planarSLAM::Graph fullgraph;
// i keeps track of the time step
size_t i = 0;
// Add a prior at time 0 and update isam
{
planarSLAM::Graph newfactors;
newfactors.addPrior(0, Pose2(0.0, 0.0, 0.0), odoNoise);
fullgraph.push_back(newfactors);
Values init;
init.insert(PoseKey(0), Pose2(0.01, 0.01, 0.01));
fullinit.insert(PoseKey(0), Pose2(0.01, 0.01, 0.01));
isam.update(newfactors, init);
}
CHECK(isam_check(fullgraph, fullinit, isam));
// Add odometry from time 0 to time 5
for( ; i<5; ++i) {
planarSLAM::Graph newfactors;
newfactors.addOdometry(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise);
fullgraph.push_back(newfactors);
Values init;
init.insert(PoseKey(i+1), Pose2(double(i+1)+0.1, -0.1, 0.01));
fullinit.insert(PoseKey(i+1), Pose2(double(i+1)+0.1, -0.1, 0.01));
isam.update(newfactors, init);
}
// Add odometry from time 5 to 6 and landmark measurement at time 5
{
planarSLAM::Graph newfactors;
newfactors.addOdometry(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise);
newfactors.addBearingRange(i, 0, Rot2::fromAngle(M_PI/4.0), 5.0, brNoise);
newfactors.addBearingRange(i, 1, Rot2::fromAngle(-M_PI/4.0), 5.0, brNoise);
fullgraph.push_back(newfactors);
Values init;
init.insert(PoseKey(i+1), Pose2(1.01, 0.01, 0.01));
init.insert(PointKey(0), Point2(5.0/sqrt(2.0), 5.0/sqrt(2.0)));
init.insert(PointKey(1), Point2(5.0/sqrt(2.0), -5.0/sqrt(2.0)));
fullinit.insert(PoseKey(i+1), Pose2(1.01, 0.01, 0.01));
fullinit.insert(PointKey(0), Point2(5.0/sqrt(2.0), 5.0/sqrt(2.0)));
fullinit.insert(PointKey(1), Point2(5.0/sqrt(2.0), -5.0/sqrt(2.0)));
isam.update(newfactors, init);
++ i;
}
// Add odometry from time 6 to time 10
for( ; i<10; ++i) {
planarSLAM::Graph newfactors;
newfactors.addOdometry(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise);
fullgraph.push_back(newfactors);
Values init;
init.insert(PoseKey(i+1), Pose2(double(i+1)+0.1, -0.1, 0.01));
fullinit.insert(PoseKey(i+1), Pose2(double(i+1)+0.1, -0.1, 0.01));
isam.update(newfactors, init);
}
// Add odometry from time 10 to 11 and landmark measurement at time 10
{
planarSLAM::Graph newfactors;
newfactors.addOdometry(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise);
newfactors.addBearingRange(i, 0, Rot2::fromAngle(M_PI/4.0 + M_PI/16.0), 4.5, brNoise);
newfactors.addBearingRange(i, 1, Rot2::fromAngle(-M_PI/4.0 + M_PI/16.0), 4.5, brNoise);
fullgraph.push_back(newfactors);
Values init;
init.insert(PoseKey(i+1), Pose2(6.9, 0.1, 0.01));
fullinit.insert(PoseKey(i+1), Pose2(6.9, 0.1, 0.01));
isam.update(newfactors, init);
++ i;
}
// Compare solutions
CHECK(isam_check(fullgraph, fullinit, isam));
// Check gradient at each node
typedef ISAM2::sharedClique sharedClique;
BOOST_FOREACH(const sharedClique& clique, isam.nodes()) {
// Compute expected gradient
FactorGraph<JacobianFactor> jfg;
jfg.push_back(JacobianFactor::shared_ptr(new JacobianFactor(*clique->conditional())));
VectorValues expectedGradient(*allocateVectorValues(isam));
gradientAtZero(jfg, expectedGradient);
// Compare with actual gradients
int variablePosition = 0;
for(GaussianConditional::const_iterator jit = clique->conditional()->begin(); jit != clique->conditional()->end(); ++jit) {
const int dim = clique->conditional()->dim(jit);
Vector actual = clique->gradientContribution().segment(variablePosition, dim);
EXPECT(assert_equal(expectedGradient[*jit], actual));
variablePosition += dim;
}
LONGS_EQUAL(clique->gradientContribution().rows(), variablePosition);
}
// Check gradient
VectorValues expectedGradient(*allocateVectorValues(isam));
gradientAtZero(FactorGraph<JacobianFactor>(isam), expectedGradient);
VectorValues expectedGradient2(gradient(FactorGraph<JacobianFactor>(isam), VectorValues::Zero(expectedGradient)));
VectorValues actualGradient(*allocateVectorValues(isam));
gradientAtZero(isam, actualGradient);
EXPECT(assert_equal(expectedGradient2, expectedGradient));
EXPECT(assert_equal(expectedGradient, actualGradient));
}
/* ************************************************************************* */
TEST(ISAM2, slamlike_solution_dogleg_qr)
{
// SETDEBUG("ISAM2 update", true);
// SETDEBUG("ISAM2 update verbose", true);
// SETDEBUG("ISAM2 recalculate", true);
// Pose and landmark key types from planarSLAM
using planarSLAM::PoseKey;
using planarSLAM::PointKey;
// Set up parameters
SharedDiagonal odoNoise = sharedSigmas(Vector_(3, 0.1, 0.1, M_PI/100.0));
SharedDiagonal brNoise = sharedSigmas(Vector_(2, M_PI/100.0, 0.1));
// These variables will be reused and accumulate factors and values
ISAM2 isam(ISAM2Params(ISAM2DoglegParams(1.0), 0.0, 0, false, false, ISAM2Params::QR));
Values fullinit;
planarSLAM::Graph fullgraph;
// i keeps track of the time step
size_t i = 0;
// Add a prior at time 0 and update isam
{
planarSLAM::Graph newfactors;
newfactors.addPrior(0, Pose2(0.0, 0.0, 0.0), odoNoise);
fullgraph.push_back(newfactors);
Values init;
init.insert(PoseKey(0), Pose2(0.01, 0.01, 0.01));
fullinit.insert(PoseKey(0), Pose2(0.01, 0.01, 0.01));
isam.update(newfactors, init);
}
CHECK(isam_check(fullgraph, fullinit, isam));
// Add odometry from time 0 to time 5
for( ; i<5; ++i) {
planarSLAM::Graph newfactors;
newfactors.addOdometry(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise);
fullgraph.push_back(newfactors);
Values init;
init.insert(PoseKey(i+1), Pose2(double(i+1)+0.1, -0.1, 0.01));
fullinit.insert(PoseKey(i+1), Pose2(double(i+1)+0.1, -0.1, 0.01));
isam.update(newfactors, init);
}
// Add odometry from time 5 to 6 and landmark measurement at time 5
{
planarSLAM::Graph newfactors;
newfactors.addOdometry(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise);
newfactors.addBearingRange(i, 0, Rot2::fromAngle(M_PI/4.0), 5.0, brNoise);
newfactors.addBearingRange(i, 1, Rot2::fromAngle(-M_PI/4.0), 5.0, brNoise);
fullgraph.push_back(newfactors);
Values init;
init.insert(PoseKey(i+1), Pose2(1.01, 0.01, 0.01));
init.insert(PointKey(0), Point2(5.0/sqrt(2.0), 5.0/sqrt(2.0)));
init.insert(PointKey(1), Point2(5.0/sqrt(2.0), -5.0/sqrt(2.0)));
fullinit.insert(PoseKey(i+1), Pose2(1.01, 0.01, 0.01));
fullinit.insert(PointKey(0), Point2(5.0/sqrt(2.0), 5.0/sqrt(2.0)));
fullinit.insert(PointKey(1), Point2(5.0/sqrt(2.0), -5.0/sqrt(2.0)));
isam.update(newfactors, init);
++ i;
}
// Add odometry from time 6 to time 10
for( ; i<10; ++i) {
planarSLAM::Graph newfactors;
newfactors.addOdometry(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise);
fullgraph.push_back(newfactors);
Values init;
init.insert(PoseKey(i+1), Pose2(double(i+1)+0.1, -0.1, 0.01));
fullinit.insert(PoseKey(i+1), Pose2(double(i+1)+0.1, -0.1, 0.01));
isam.update(newfactors, init);
}
// Add odometry from time 10 to 11 and landmark measurement at time 10
{
planarSLAM::Graph newfactors;
newfactors.addOdometry(i, i+1, Pose2(1.0, 0.0, 0.0), odoNoise);
newfactors.addBearingRange(i, 0, Rot2::fromAngle(M_PI/4.0 + M_PI/16.0), 4.5, brNoise);
newfactors.addBearingRange(i, 1, Rot2::fromAngle(-M_PI/4.0 + M_PI/16.0), 4.5, brNoise);
fullgraph.push_back(newfactors);
Values init;
init.insert(PoseKey(i+1), Pose2(6.9, 0.1, 0.01));
fullinit.insert(PoseKey(i+1), Pose2(6.9, 0.1, 0.01));
isam.update(newfactors, init);
++ i;
}
// Compare solutions
CHECK(isam_check(fullgraph, fullinit, isam));
// Check gradient at each node
typedef ISAM2::sharedClique sharedClique;
BOOST_FOREACH(const sharedClique& clique, isam.nodes()) {
// Compute expected gradient
FactorGraph<JacobianFactor> jfg;
@ -473,7 +787,7 @@ TEST(ISAM2, clone) {
SharedDiagonal brNoise = sharedSigmas(Vector_(2, M_PI/100.0, 0.1));
// These variables will be reused and accumulate factors and values
GaussianISAM2<> isam(ISAM2Params(ISAM2GaussNewtonParams(0.001), 0.0, 0, false, true));
ISAM2 isam(ISAM2Params(ISAM2GaussNewtonParams(0.001), 0.0, 0, false, true));
Values fullinit;
planarSLAM::Graph fullgraph;
@ -558,8 +872,8 @@ TEST(ISAM2, clone) {
}
// CLONING...
boost::shared_ptr<GaussianISAM2<> > isam2
= boost::shared_ptr<GaussianISAM2<> >(new GaussianISAM2<>());
boost::shared_ptr<ISAM2 > isam2
= boost::shared_ptr<ISAM2 >(new ISAM2());
isam.cloneTo(isam2);
CHECK(assert_equal(isam, *isam2));
@ -567,24 +881,23 @@ TEST(ISAM2, clone) {
/* ************************************************************************* */
TEST(ISAM2, permute_cached) {
typedef ISAM2Clique<GaussianConditional> Clique;
typedef boost::shared_ptr<ISAM2Clique<GaussianConditional> > sharedClique;
typedef boost::shared_ptr<ISAM2Clique> sharedISAM2Clique;
// Construct expected permuted BayesTree (variable 2 has been changed to 1)
BayesTree<GaussianConditional, Clique> expected;
expected.insert(sharedClique(new Clique(make_pair(
BayesTree<GaussianConditional, ISAM2Clique> expected;
expected.insert(sharedISAM2Clique(new ISAM2Clique(make_pair(
boost::make_shared<GaussianConditional>(pair_list_of
(3, Matrix_(1,1,1.0))
(4, Matrix_(1,1,2.0)),
2, Vector_(1,1.0), Vector_(1,1.0)), // p(3,4)
HessianFactor::shared_ptr())))); // Cached: empty
expected.insert(sharedClique(new Clique(make_pair(
expected.insert(sharedISAM2Clique(new ISAM2Clique(make_pair(
boost::make_shared<GaussianConditional>(pair_list_of
(2, Matrix_(1,1,1.0))
(3, Matrix_(1,1,2.0)),
1, Vector_(1,1.0), Vector_(1,1.0)), // p(2|3)
boost::make_shared<HessianFactor>(3, Matrix_(1,1,1.0), Vector_(1,1.0), 0.0))))); // Cached: p(3)
expected.insert(sharedClique(new Clique(make_pair(
expected.insert(sharedISAM2Clique(new ISAM2Clique(make_pair(
boost::make_shared<GaussianConditional>(pair_list_of
(0, Matrix_(1,1,1.0))
(2, Matrix_(1,1,2.0)),
@ -595,20 +908,20 @@ TEST(ISAM2, permute_cached) {
expected.root()->children().front()->children().front()->conditional()->keys()[1] = 1;
// Construct unpermuted BayesTree
BayesTree<GaussianConditional, Clique> actual;
actual.insert(sharedClique(new Clique(make_pair(
BayesTree<GaussianConditional, ISAM2Clique> actual;
actual.insert(sharedISAM2Clique(new ISAM2Clique(make_pair(
boost::make_shared<GaussianConditional>(pair_list_of
(3, Matrix_(1,1,1.0))
(4, Matrix_(1,1,2.0)),
2, Vector_(1,1.0), Vector_(1,1.0)), // p(3,4)
HessianFactor::shared_ptr())))); // Cached: empty
actual.insert(sharedClique(new Clique(make_pair(
actual.insert(sharedISAM2Clique(new ISAM2Clique(make_pair(
boost::make_shared<GaussianConditional>(pair_list_of
(2, Matrix_(1,1,1.0))
(3, Matrix_(1,1,2.0)),
1, Vector_(1,1.0), Vector_(1,1.0)), // p(2|3)
boost::make_shared<HessianFactor>(3, Matrix_(1,1,1.0), Vector_(1,1.0), 0.0))))); // Cached: p(3)
actual.insert(sharedClique(new Clique(make_pair(
actual.insert(sharedISAM2Clique(new ISAM2Clique(make_pair(
boost::make_shared<GaussianConditional>(pair_list_of
(0, Matrix_(1,1,1.0))
(2, Matrix_(1,1,2.0)),
@ -646,7 +959,7 @@ TEST(ISAM2, removeFactors)
SharedDiagonal brNoise = sharedSigmas(Vector_(2, M_PI/100.0, 0.1));
// These variables will be reused and accumulate factors and values
GaussianISAM2<> isam(ISAM2Params(ISAM2GaussNewtonParams(0.001), 0.0, 0, false));
ISAM2 isam(ISAM2Params(ISAM2GaussNewtonParams(0.001), 0.0, 0, false));
Values fullinit;
planarSLAM::Graph fullgraph;
@ -740,7 +1053,7 @@ TEST(ISAM2, removeFactors)
CHECK(isam_check(fullgraph, fullinit, isam));
// Check gradient at each node
typedef GaussianISAM2<>::sharedClique sharedClique;
typedef ISAM2::sharedClique sharedClique;
BOOST_FOREACH(const sharedClique& clique, isam.nodes()) {
// Compute expected gradient
FactorGraph<JacobianFactor> jfg;
@ -785,12 +1098,14 @@ TEST(ISAM2, constrained_ordering)
SharedDiagonal brNoise = sharedSigmas(Vector_(2, M_PI/100.0, 0.1));
// These variables will be reused and accumulate factors and values
GaussianISAM2<> isam(ISAM2Params(ISAM2GaussNewtonParams(0.001), 0.0, 0, false));
ISAM2 isam(ISAM2Params(ISAM2GaussNewtonParams(0.001), 0.0, 0, false));
Values fullinit;
planarSLAM::Graph fullgraph;
// We will constrain x3 and x4 to the end
FastSet<Key> constrained; constrained.insert(planarSLAM::PoseKey(3)); constrained.insert(planarSLAM::PoseKey(4));
FastMap<Key, int> constrained;
constrained.insert(make_pair(planarSLAM::PoseKey(3), 1));
constrained.insert(make_pair(planarSLAM::PoseKey(4), 2));
// i keeps track of the time step
size_t i = 0;
@ -879,11 +1194,10 @@ TEST(ISAM2, constrained_ordering)
EXPECT(isam_check(fullgraph, fullinit, isam));
// Check that x3 and x4 are last, but either can come before the other
EXPECT((isam.getOrdering()[planarSLAM::PoseKey(3)] == 12 && isam.getOrdering()[planarSLAM::PoseKey(4)] == 13) ||
(isam.getOrdering()[planarSLAM::PoseKey(3)] == 13 && isam.getOrdering()[planarSLAM::PoseKey(4)] == 12));
EXPECT(isam.getOrdering()[planarSLAM::PoseKey(3)] == 12 && isam.getOrdering()[planarSLAM::PoseKey(4)] == 13);
// Check gradient at each node
typedef GaussianISAM2<>::sharedClique sharedClique;
typedef ISAM2::sharedClique sharedClique;
BOOST_FOREACH(const sharedClique& clique, isam.nodes()) {
// Compute expected gradient
FactorGraph<JacobianFactor> jfg;

View File

@ -17,8 +17,9 @@ install(FILES matlab.h DESTINATION ${CMAKE_INSTALL_PREFIX}/include/wrap)
# Build tests
if (GTSAM_BUILD_TESTS)
add_definitions(-DTOPSRCDIR="${CMAKE_SOURCE_DIR}")
gtsam_add_external_tests(wrap wrap_lib)
add_definitions(-DTOPSRCDIR="${CMAKE_SOURCE_DIR}")
set(wrap_local_libs wrap_lib)
gtsam_add_subdir_tests("wrap" "${wrap_local_libs}" "${wrap_local_libs}" "")
endif(GTSAM_BUILD_TESTS)
# Wrap codegen

View File

@ -35,16 +35,22 @@ void Class::matlab_proxy(const string& classFile) const {
string matlabName = qualifiedName();
// emit class proxy code
file.oss << "classdef " << matlabName << endl;
// we want our class to inherit the handle class for memory purposes
file.oss << "classdef " << matlabName << " < handle" << endl;
file.oss << " properties" << endl;
file.oss << " self = 0" << endl;
file.oss << " end" << endl;
file.oss << " methods" << endl;
// constructor
file.oss << " function obj = " << matlabName << "(varargin)" << endl;
BOOST_FOREACH(Constructor c, constructors)
c.matlab_proxy_fragment(file,matlabName);
file.oss << " if nargin ~= 13 && obj.self == 0, error('" << matlabName << " constructor failed'); end" << endl;
file.oss << " end" << endl;
// deconstructor
file.oss << " function delete(obj)" << endl;
file.oss << " delete_" << matlabName << "(obj);" << endl;
file.oss << " end" << endl;
file.oss << " function display(obj), obj.print(''); end" << endl;
file.oss << " function disp(obj), obj.display; end" << endl;
file.oss << " end" << endl;
@ -62,6 +68,11 @@ void Class::matlab_constructors(const string& toolboxPath, const vector<string>&
}
}
/* ************************************************************************* */
void Class::matlab_deconstructor(const string& toolboxPath, const vector<string>& using_namespaces) const {
d.matlab_mfile (toolboxPath, qualifiedName());
d.matlab_wrapper(toolboxPath, qualifiedName("::"), qualifiedName(), using_namespaces, includes);
}
/* ************************************************************************* */
void Class::matlab_methods(const string& classPath, const vector<string>& using_namespaces) const {
string matlabName = qualifiedName(), cppName = qualifiedName("::");
@ -88,6 +99,7 @@ void Class::matlab_make_fragment(FileWriter& file,
string matlabClassName = qualifiedName();
BOOST_FOREACH(Constructor c, constructors)
file.oss << mex << c.matlab_wrapper_name(matlabClassName) << ".cpp" << endl;
file.oss << mex << d.matlab_wrapper_name(matlabClassName) << ".cpp" << endl;
BOOST_FOREACH(StaticMethod sm, static_methods)
file.oss << mex << matlabClassName + "_" + sm.name << ".cpp" << endl;
file.oss << endl << "cd @" << matlabClassName << endl;
@ -119,6 +131,7 @@ void Class::makefile_fragment(FileWriter& file) const {
string file_base = c.matlab_wrapper_name(matlabName);
file_names.push_back(file_base);
}
file_names.push_back(d.matlab_wrapper_name(matlabName));
BOOST_FOREACH(StaticMethod c, static_methods) {
string file_base = matlabName + "_" + c.name;
file_names.push_back(file_base);

View File

@ -20,6 +20,7 @@
#include <string>
#include "Constructor.h"
#include "Deconstructor.h"
#include "Method.h"
#include "StaticMethod.h"
@ -37,12 +38,15 @@ struct Class {
std::vector<StaticMethod> static_methods; ///< Static methods
std::vector<std::string> namespaces; ///< Stack of namespaces
std::vector<std::string> includes; ///< header include overrides
Deconstructor d;
bool verbose_; ///< verbose flag
// And finally MATLAB code is emitted, methods below called by Module::matlab_code
void matlab_proxy(const std::string& classFile) const; ///< emit proxy class
void matlab_constructors(const std::string& toolboxPath,
const std::vector<std::string>& using_namespaces) const; ///< emit constructor wrappers
void matlab_deconstructor(const std::string& toolboxPath,
const std::vector<std::string>& using_namespaces) const;
void matlab_methods(const std::string& classPath,
const std::vector<std::string>& using_namespaces) const; ///< emit method wrappers
void matlab_static_methods(const std::string& classPath,

83
wrap/Deconstructor.cpp Normal file
View File

@ -0,0 +1,83 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Deconstructor.ccp
* @author Frank Dellaert
* @author Andrew Melim
**/
#include <iostream>
#include <fstream>
#include <boost/foreach.hpp>
#include "utilities.h"
#include "Deconstructor.h"
using namespace std;
using namespace wrap;
/* ************************************************************************* */
string Deconstructor::matlab_wrapper_name(const string& className) const {
string str = "delete_" + className;
return str;
}
/* ************************************************************************* */
void Deconstructor::matlab_mfile(const string& toolboxPath, const string& qualifiedMatlabName) const {
string matlabName = matlab_wrapper_name(qualifiedMatlabName);
// open destination m-file
string wrapperFile = toolboxPath + "/" + matlabName + ".m";
FileWriter file(wrapperFile, verbose_, "%");
// generate code
file.oss << "function result = " << matlabName << "(obj";
if (args.size()) file.oss << "," << args.names();
file.oss << ")" << endl;
file.oss << " error('need to compile " << matlabName << ".cpp');" << endl;
file.oss << "end" << endl;
// close file
file.emit(true);
}
/* ************************************************************************* */
void Deconstructor::matlab_wrapper(const string& toolboxPath,
const string& cppClassName,
const string& matlabClassName,
const vector<string>& using_namespaces, const vector<string>& includes) const {
string matlabName = matlab_wrapper_name(matlabClassName);
// open destination wrapperFile
string wrapperFile = toolboxPath + "/" + matlabName + ".cpp";
FileWriter file(wrapperFile, verbose_, "//");
// generate code
//
generateIncludes(file, name, includes);
cout << "Generate includes " << name << endl;
generateUsingNamespace(file, using_namespaces);
file.oss << "void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])" << endl;
file.oss << "{" << endl;
//Deconstructor takes 1 arg, the mxArray obj
file.oss << " checkArguments(\"" << matlabName << "\",nargout,nargin," << "1" << ");" << endl;
file.oss << " delete_shared_ptr< " << cppClassName << " >(in[0],\"" << matlabClassName << "\");" << endl;
file.oss << "}" << endl;
// close file
file.emit(true);
}
/* ************************************************************************* */

61
wrap/Deconstructor.h Normal file
View File

@ -0,0 +1,61 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file Deconstructor.h
* @brief class describing a constructor + code generation
* @author Frank Dellaert
* @author Andrew Melim
**/
#pragma once
#include <string>
#include <vector>
#include "Argument.h"
namespace wrap {
// Deconstructor class
struct Deconstructor {
/// Deconstructor creates an empty class
Deconstructor(bool verbose = true) :
verbose_(verbose) {
}
// Then the instance variables are set directly by the Module deconstructor
ArgumentList args;
std::string name;
bool verbose_;
// MATLAB code generation
// toolboxPath is main toolbox directory, e.g., ../matlab
// classFile is class proxy file, e.g., ../matlab/@Point2/Point2.m
/// wrapper name
std::string matlab_wrapper_name(const std::string& className) const;
/// m-file
void matlab_mfile(const std::string& toolboxPath,
const std::string& qualifiedMatlabName) const;
/// cpp wrapper
void matlab_wrapper(const std::string& toolboxPath,
const std::string& cppClassName,
const std::string& matlabClassName,
const std::vector<std::string>& using_namespaces,
const std::vector<std::string>& includes) const;
};
} // \namespace wrap

View File

@ -12,6 +12,8 @@
/**
* @file Module.ccp
* @author Frank Dellaert
* @author Alex Cunningham
* @author Andrew Melim
**/
#include "Module.h"
@ -49,6 +51,7 @@ Module::Module(const string& interfacePath,
Argument arg0, arg;
ArgumentList args0, args;
Constructor constructor0(enable_verbose), constructor(enable_verbose);
Deconstructor deconstructor0(enable_verbose), deconstructor(enable_verbose);
Method method0(enable_verbose), method(enable_verbose);
StaticMethod static_method0(enable_verbose), static_method(enable_verbose);
Class cls0(enable_verbose),cls(enable_verbose);
@ -180,7 +183,10 @@ Module::Module(const string& interfacePath,
>> str_p("};"))
[assign_a(cls.namespaces, namespaces)]
[append_a(cls.includes, namespace_includes)]
[assign_a(deconstructor.name,cls.name)]
[assign_a(cls.d, deconstructor)]
[push_back_a(classes,cls)]
[assign_a(deconstructor,deconstructor0)]
[assign_a(cls,cls0)];
Rule namespace_def_p =
@ -339,6 +345,9 @@ void Module::matlab_code(const string& toolboxPath,
cls.matlab_static_methods(toolboxPath,using_namespaces);
cls.matlab_methods(classPath,using_namespaces);
// create deconstructor
cls.matlab_deconstructor(toolboxPath,using_namespaces);
// add lines to make m-file
makeModuleMfile.oss << "%% " << cls.qualifiedName() << endl;
makeModuleMfile.oss << "cd(toolboxpath)" << endl;

View File

@ -281,8 +281,6 @@ gtsam::Matrix unwrap< gtsam::Matrix >(const mxArray* array) {
// inspired by mexhandle, but using shared_ptr
//*****************************************************************************
template<typename T> class Collector;
template<typename T>
class ObjectHandle {
private:
@ -296,13 +294,12 @@ public:
ObjectHandle(T* ptr) :
type(&typeid(T)), t(shared_ptr<T> (ptr)) {
signature = this;
Collector<T>::register_handle(this);
}
// Constructor for shared pointers
// Creates shared pointer, will delete if is last one to hold pointer
ObjectHandle(shared_ptr<T> ptr) :
type(&typeid(T)), t(ptr) {
/*type(&typeid(T)),*/ t(ptr) {
signature = this;
}
@ -364,42 +361,6 @@ public:
return obj;
}
friend class Collector<T> ; // allow Collector access to signature
};
// ---------------------------------------------------------
// ------------------ Garbage Collection -------------------
// ---------------------------------------------------------
// Garbage collection singleton (one collector object for each type T).
// Ensures that registered handles are deleted when the dll is released (they
// may also be deleted previously without problem).
// The Collector provides protection against resource leaks in the case
// where 'clear all' is called in MatLab. (This is because MatLab will call
// the destructors of statically allocated objects but not free-store allocated
// objects.)
template <typename T>
class Collector {
typedef ObjectHandle<T> Handle;
typedef std::list< Handle* > ObjList;
typedef typename ObjList::iterator iterator;
ObjList objlist;
public:
~Collector() {
for (iterator i= objlist.begin(); i!=objlist.end(); ++i) {
if ((*i)->signature == *i) // check for valid signature
delete *i;
}
}
static void register_handle (Handle* obj) {
static Collector singleton;
singleton.objlist.push_back(obj);
}
private: // prevent construction
Collector() {}
Collector(const Collector&);
};
//*****************************************************************************
@ -462,6 +423,7 @@ mxArray* wrap_shared_ptr(shared_ptr< Class > shared_ptr, const char *classname)
*/
template <typename Class>
shared_ptr<Class> unwrap_shared_ptr(const mxArray* obj, const string& className) {
//Why is this here?
#ifndef UNSAFE_WRAP
bool isClass = mxIsClass(obj, className.c_str());
if (!isClass) {
@ -475,4 +437,20 @@ shared_ptr<Class> unwrap_shared_ptr(const mxArray* obj, const string& className)
return handle->get_object();
}
template <typename Class>
void delete_shared_ptr(const mxArray* obj, const string& className) {
//Why is this here?
#ifndef UNSAFE_WRAP
bool isClass = true;//mxIsClass(obj, className.c_str());
if (!isClass) {
mexPrintf("Expected %s, got %s\n", className.c_str(), mxGetClassName(obj));
error("Argument has wrong type.");
}
#endif
mxArray* mxh = mxGetProperty(obj,0,"self");
if (mxh==NULL) error("unwrap_reference: invalid wrap object");
ObjectHandle<Class>* handle = ObjectHandle<Class>::from_mex_handle(mxh);
delete handle;
}
//*****************************************************************************

View File

@ -1,5 +1,5 @@
% automatically generated by wrap
classdef Point2
classdef Point2 < handle
properties
self = 0
end
@ -9,6 +9,9 @@ classdef Point2
if (nargin == 2 && isa(varargin{1},'double') && isa(varargin{2},'double')), obj.self = new_Point2_dd(varargin{1},varargin{2}); end
if nargin ~= 13 && obj.self == 0, error('Point2 constructor failed'); end
end
function delete(obj)
delete_Point2(obj);
end
function display(obj), obj.print(''); end
function disp(obj), obj.display; end
end

View File

@ -1,5 +1,5 @@
% automatically generated by wrap
classdef Point3
classdef Point3 < handle
properties
self = 0
end
@ -8,6 +8,9 @@ classdef Point3
if (nargin == 3 && isa(varargin{1},'double') && isa(varargin{2},'double') && isa(varargin{3},'double')), obj.self = new_Point3_ddd(varargin{1},varargin{2},varargin{3}); end
if nargin ~= 13 && obj.self == 0, error('Point3 constructor failed'); end
end
function delete(obj)
delete_Point3(obj);
end
function display(obj), obj.print(''); end
function disp(obj), obj.display; end
end

View File

@ -1,5 +1,5 @@
% automatically generated by wrap
classdef Test
classdef Test < handle
properties
self = 0
end
@ -9,6 +9,9 @@ classdef Test
if (nargin == 2 && isa(varargin{1},'double') && isa(varargin{2},'double')), obj.self = new_Test_dM(varargin{1},varargin{2}); end
if nargin ~= 13 && obj.self == 0, error('Test constructor failed'); end
end
function delete(obj)
delete_Test(obj);
end
function display(obj), obj.print(''); end
function disp(obj), obj.display; end
end

View File

@ -11,6 +11,8 @@ new_Point2_.$(MEXENDING): new_Point2_.cpp
$(MEX) $(mex_flags) new_Point2_.cpp -output new_Point2_
new_Point2_dd.$(MEXENDING): new_Point2_dd.cpp
$(MEX) $(mex_flags) new_Point2_dd.cpp -output new_Point2_dd
delete_Point2.$(MEXENDING): delete_Point2.cpp
$(MEX) $(mex_flags) delete_Point2.cpp -output delete_Point2
@Point2/x.$(MEXENDING): @Point2/x.cpp
$(MEX) $(mex_flags) @Point2/x.cpp -output @Point2/x
@Point2/y.$(MEXENDING): @Point2/y.cpp
@ -24,11 +26,13 @@ new_Point2_dd.$(MEXENDING): new_Point2_dd.cpp
@Point2/vectorConfusion.$(MEXENDING): @Point2/vectorConfusion.cpp
$(MEX) $(mex_flags) @Point2/vectorConfusion.cpp -output @Point2/vectorConfusion
Point2: new_Point2_.$(MEXENDING) new_Point2_dd.$(MEXENDING) @Point2/x.$(MEXENDING) @Point2/y.$(MEXENDING) @Point2/dim.$(MEXENDING) @Point2/returnChar.$(MEXENDING) @Point2/argChar.$(MEXENDING) @Point2/vectorConfusion.$(MEXENDING)
Point2: new_Point2_.$(MEXENDING) new_Point2_dd.$(MEXENDING) delete_Point2.$(MEXENDING) @Point2/x.$(MEXENDING) @Point2/y.$(MEXENDING) @Point2/dim.$(MEXENDING) @Point2/returnChar.$(MEXENDING) @Point2/argChar.$(MEXENDING) @Point2/vectorConfusion.$(MEXENDING)
# Point3
new_Point3_ddd.$(MEXENDING): new_Point3_ddd.cpp
$(MEX) $(mex_flags) new_Point3_ddd.cpp -output new_Point3_ddd
delete_Point3.$(MEXENDING): delete_Point3.cpp
$(MEX) $(mex_flags) delete_Point3.cpp -output delete_Point3
Point3_staticFunction.$(MEXENDING): Point3_staticFunction.cpp
$(MEX) $(mex_flags) Point3_staticFunction.cpp -output Point3_staticFunction
Point3_StaticFunctionRet.$(MEXENDING): Point3_StaticFunctionRet.cpp
@ -36,13 +40,15 @@ Point3_StaticFunctionRet.$(MEXENDING): Point3_StaticFunctionRet.cpp
@Point3/norm.$(MEXENDING): @Point3/norm.cpp
$(MEX) $(mex_flags) @Point3/norm.cpp -output @Point3/norm
Point3: new_Point3_ddd.$(MEXENDING) Point3_staticFunction.$(MEXENDING) Point3_StaticFunctionRet.$(MEXENDING) @Point3/norm.$(MEXENDING)
Point3: new_Point3_ddd.$(MEXENDING) delete_Point3.$(MEXENDING) Point3_staticFunction.$(MEXENDING) Point3_StaticFunctionRet.$(MEXENDING) @Point3/norm.$(MEXENDING)
# Test
new_Test_.$(MEXENDING): new_Test_.cpp
$(MEX) $(mex_flags) new_Test_.cpp -output new_Test_
new_Test_dM.$(MEXENDING): new_Test_dM.cpp
$(MEX) $(mex_flags) new_Test_dM.cpp -output new_Test_dM
delete_Test.$(MEXENDING): delete_Test.cpp
$(MEX) $(mex_flags) delete_Test.cpp -output delete_Test
@Test/return_pair.$(MEXENDING): @Test/return_pair.cpp
$(MEX) $(mex_flags) @Test/return_pair.cpp -output @Test/return_pair
@Test/return_bool.$(MEXENDING): @Test/return_bool.cpp
@ -82,7 +88,7 @@ new_Test_dM.$(MEXENDING): new_Test_dM.cpp
@Test/print.$(MEXENDING): @Test/print.cpp
$(MEX) $(mex_flags) @Test/print.cpp -output @Test/print
Test: new_Test_.$(MEXENDING) new_Test_dM.$(MEXENDING) @Test/return_pair.$(MEXENDING) @Test/return_bool.$(MEXENDING) @Test/return_size_t.$(MEXENDING) @Test/return_int.$(MEXENDING) @Test/return_double.$(MEXENDING) @Test/return_string.$(MEXENDING) @Test/return_vector1.$(MEXENDING) @Test/return_matrix1.$(MEXENDING) @Test/return_vector2.$(MEXENDING) @Test/return_matrix2.$(MEXENDING) @Test/arg_EigenConstRef.$(MEXENDING) @Test/return_field.$(MEXENDING) @Test/return_TestPtr.$(MEXENDING) @Test/return_Test.$(MEXENDING) @Test/return_Point2Ptr.$(MEXENDING) @Test/create_ptrs.$(MEXENDING) @Test/create_MixedPtrs.$(MEXENDING) @Test/return_ptrs.$(MEXENDING) @Test/print.$(MEXENDING)
Test: new_Test_.$(MEXENDING) new_Test_dM.$(MEXENDING) delete_Test.$(MEXENDING) @Test/return_pair.$(MEXENDING) @Test/return_bool.$(MEXENDING) @Test/return_size_t.$(MEXENDING) @Test/return_int.$(MEXENDING) @Test/return_double.$(MEXENDING) @Test/return_string.$(MEXENDING) @Test/return_vector1.$(MEXENDING) @Test/return_matrix1.$(MEXENDING) @Test/return_vector2.$(MEXENDING) @Test/return_matrix2.$(MEXENDING) @Test/arg_EigenConstRef.$(MEXENDING) @Test/return_field.$(MEXENDING) @Test/return_TestPtr.$(MEXENDING) @Test/return_Test.$(MEXENDING) @Test/return_Point2Ptr.$(MEXENDING) @Test/create_ptrs.$(MEXENDING) @Test/create_MixedPtrs.$(MEXENDING) @Test/return_ptrs.$(MEXENDING) @Test/print.$(MEXENDING)

View File

@ -0,0 +1,9 @@
// automatically generated by wrap
#include <wrap/matlab.h>
#include <Point2.h>
using namespace geometry;
void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
{
checkArguments("delete_Point2",nargout,nargin,1);
delete_shared_ptr< Point2 >(in[0],"Point2");
}

View File

@ -0,0 +1,4 @@
% automatically generated by wrap
function result = delete_Point2(obj)
error('need to compile delete_Point2.cpp');
end

View File

@ -0,0 +1,9 @@
// automatically generated by wrap
#include <wrap/matlab.h>
#include <Point3.h>
using namespace geometry;
void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
{
checkArguments("delete_Point3",nargout,nargin,1);
delete_shared_ptr< Point3 >(in[0],"Point3");
}

View File

@ -0,0 +1,4 @@
% automatically generated by wrap
function result = delete_Point3(obj)
error('need to compile delete_Point3.cpp');
end

View File

@ -0,0 +1,9 @@
// automatically generated by wrap
#include <wrap/matlab.h>
#include <folder/path/to/Test.h>
using namespace geometry;
void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
{
checkArguments("delete_Test",nargout,nargin,1);
delete_shared_ptr< Test >(in[0],"Test");
}

View File

@ -0,0 +1,4 @@
% automatically generated by wrap
function result = delete_Test(obj)
error('need to compile delete_Test.cpp');
end

View File

@ -11,6 +11,7 @@ addpath(toolboxpath);
cd(toolboxpath)
mex -O5 new_Point2_.cpp
mex -O5 new_Point2_dd.cpp
mex -O5 delete_Point2.cpp
cd @Point2
mex -O5 x.cpp
@ -23,6 +24,7 @@ mex -O5 vectorConfusion.cpp
%% Point3
cd(toolboxpath)
mex -O5 new_Point3_ddd.cpp
mex -O5 delete_Point3.cpp
mex -O5 Point3_staticFunction.cpp
mex -O5 Point3_StaticFunctionRet.cpp
@ -33,6 +35,7 @@ mex -O5 norm.cpp
cd(toolboxpath)
mex -O5 new_Test_.cpp
mex -O5 new_Test_dM.cpp
mex -O5 delete_Test.cpp
cd @Test
mex -O5 return_pair.cpp

View File

@ -1,5 +1,5 @@
% automatically generated by wrap
classdef ClassD
classdef ClassD < handle
properties
self = 0
end
@ -8,6 +8,9 @@ classdef ClassD
if (nargin == 0), obj.self = new_ClassD_(); end
if nargin ~= 13 && obj.self == 0, error('ClassD constructor failed'); end
end
function delete(obj)
delete_ClassD(obj)
end
function display(obj), obj.print(''); end
function disp(obj), obj.display; end
end

View File

@ -1,5 +1,5 @@
% automatically generated by wrap
classdef ns1ClassA
classdef ns1ClassA < handle
properties
self = 0
end
@ -8,6 +8,9 @@ classdef ns1ClassA
if (nargin == 0), obj.self = new_ns1ClassA_(); end
if nargin ~= 13 && obj.self == 0, error('ns1ClassA constructor failed'); end
end
function delete(obj)
delete_ns1ClassA(obj);
end
function display(obj), obj.print(''); end
function disp(obj), obj.display; end
end

View File

@ -1,5 +1,5 @@
% automatically generated by wrap
classdef ns1ClassB
classdef ns1ClassB < handle
properties
self = 0
end
@ -8,6 +8,9 @@ classdef ns1ClassB
if (nargin == 0), obj.self = new_ns1ClassB_(); end
if nargin ~= 13 && obj.self == 0, error('ns1ClassB constructor failed'); end
end
function delete(obj)
delete_ns1ClassB(obj);
end
function display(obj), obj.print(''); end
function disp(obj), obj.display; end
end

View File

@ -1,5 +1,5 @@
% automatically generated by wrap
classdef ns2ClassA
classdef ns2ClassA < handle
properties
self = 0
end
@ -8,6 +8,9 @@ classdef ns2ClassA
if (nargin == 0), obj.self = new_ns2ClassA_(); end
if nargin ~= 13 && obj.self == 0, error('ns2ClassA constructor failed'); end
end
function delete(obj)
delete_ns2ClassA(obj);
end
function display(obj), obj.print(''); end
function disp(obj), obj.display; end
end

View File

@ -1,5 +1,5 @@
% automatically generated by wrap
classdef ns2ClassC
classdef ns2ClassC < handle
properties
self = 0
end
@ -8,6 +8,9 @@ classdef ns2ClassC
if (nargin == 0), obj.self = new_ns2ClassC_(); end
if nargin ~= 13 && obj.self == 0, error('ns2ClassC constructor failed'); end
end
function delete(obj)
delete_ns2ClassC(obj);
end
function display(obj), obj.print(''); end
function disp(obj), obj.display; end
end

View File

@ -1,5 +1,5 @@
% automatically generated by wrap
classdef ns2ns3ClassB
classdef ns2ns3ClassB < handle
properties
self = 0
end
@ -8,6 +8,9 @@ classdef ns2ns3ClassB
if (nargin == 0), obj.self = new_ns2ns3ClassB_(); end
if nargin ~= 13 && obj.self == 0, error('ns2ns3ClassB constructor failed'); end
end
function delete(obj)
delete_ns2ns3ClassB(obj);
end
function display(obj), obj.print(''); end
function disp(obj), obj.display; end
end

View File

@ -9,18 +9,24 @@ all: ns1ClassA ns1ClassB ns2ClassA ns2ns3ClassB ns2ClassC ClassD
# ns1ClassA
new_ns1ClassA_.$(MEXENDING): new_ns1ClassA_.cpp
$(MEX) $(mex_flags) new_ns1ClassA_.cpp -output new_ns1ClassA_
delete_ns1ClassA.$(MEXENDING): delete_ns1ClassA.cpp
$(MEX) $(mex_flags) delete_ns1ClassA.cpp -output delete_ns1ClassA
ns1ClassA: new_ns1ClassA_.$(MEXENDING)
ns1ClassA: new_ns1ClassA_.$(MEXENDING) delete_ns1ClassA.$(MEXENDING)
# ns1ClassB
new_ns1ClassB_.$(MEXENDING): new_ns1ClassB_.cpp
$(MEX) $(mex_flags) new_ns1ClassB_.cpp -output new_ns1ClassB_
delete_ns1ClassB.$(MEXENDING): delete_ns1ClassB.cpp
$(MEX) $(mex_flags) delete_ns1ClassB.cpp -output delete_ns1ClassB
ns1ClassB: new_ns1ClassB_.$(MEXENDING)
ns1ClassB: new_ns1ClassB_.$(MEXENDING) delete_ns1ClassB.$(MEXENDING)
# ns2ClassA
new_ns2ClassA_.$(MEXENDING): new_ns2ClassA_.cpp
$(MEX) $(mex_flags) new_ns2ClassA_.cpp -output new_ns2ClassA_
delete_ns2ClassA.$(MEXENDING): delete_ns2ClassA.cpp
$(MEX) $(mex_flags) delete_ns2ClassA.cpp -output delete_ns2ClassA
ns2ClassA_afunction.$(MEXENDING): ns2ClassA_afunction.cpp
$(MEX) $(mex_flags) ns2ClassA_afunction.cpp -output ns2ClassA_afunction
@ns2ClassA/memberFunction.$(MEXENDING): @ns2ClassA/memberFunction.cpp
@ -30,25 +36,31 @@ ns2ClassA_afunction.$(MEXENDING): ns2ClassA_afunction.cpp
@ns2ClassA/nsReturn.$(MEXENDING): @ns2ClassA/nsReturn.cpp
$(MEX) $(mex_flags) @ns2ClassA/nsReturn.cpp -output @ns2ClassA/nsReturn
ns2ClassA: new_ns2ClassA_.$(MEXENDING) ns2ClassA_afunction.$(MEXENDING) @ns2ClassA/memberFunction.$(MEXENDING) @ns2ClassA/nsArg.$(MEXENDING) @ns2ClassA/nsReturn.$(MEXENDING)
ns2ClassA: new_ns2ClassA_.$(MEXENDING) delete_ns2ClassA.$(MEXENDING) ns2ClassA_afunction.$(MEXENDING) @ns2ClassA/memberFunction.$(MEXENDING) @ns2ClassA/nsArg.$(MEXENDING) @ns2ClassA/nsReturn.$(MEXENDING)
# ns2ns3ClassB
new_ns2ns3ClassB_.$(MEXENDING): new_ns2ns3ClassB_.cpp
$(MEX) $(mex_flags) new_ns2ns3ClassB_.cpp -output new_ns2ns3ClassB_
delete_ns2ns3ClassB.$(MEXENDING): delete_ns2ns3ClassB.cpp
$(MEX) $(mex_flags) delete_ns2ns3ClassB.cpp -output delete_ns2ns3ClassB
ns2ns3ClassB: new_ns2ns3ClassB_.$(MEXENDING)
ns2ns3ClassB: new_ns2ns3ClassB_.$(MEXENDING) delete_ns2ns3ClassB.$(MEXENDING)
# ns2ClassC
new_ns2ClassC_.$(MEXENDING): new_ns2ClassC_.cpp
$(MEX) $(mex_flags) new_ns2ClassC_.cpp -output new_ns2ClassC_
delete_ns2ClassC.$(MEXENDING): delete_ns2ClassC.cpp
$(MEX) $(mex_flags) delete_ns2ClassC.cpp -output delete_ns2ClassC
ns2ClassC: new_ns2ClassC_.$(MEXENDING)
ns2ClassC: new_ns2ClassC_.$(MEXENDING) delete_ns2ClassC.$(MEXENDING)
# ClassD
new_ClassD_.$(MEXENDING): new_ClassD_.cpp
$(MEX) $(mex_flags) new_ClassD_.cpp -output new_ClassD_
delete_ClassD.$(MEXENDING): delete_ClassD.cpp
$(MEX) $(mex_flags) delete_ClassD.cpp -output delete_ClassD
ClassD: new_ClassD_.$(MEXENDING)
ClassD: new_ClassD_.$(MEXENDING) delete_ClassD.$(MEXENDING)

View File

@ -0,0 +1,8 @@
// automatically generated by wrap
#include <wrap/matlab.h>
#include <ClassD.h>
void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
{
checkArguments("delete_ClassD",nargout,nargin,1);
delete_shared_ptr< ClassD >(in[0],"ClassD");
}

View File

@ -0,0 +1,4 @@
% automatically generated by wrap
function result = delete_ClassD(obj)
error('need to compile delete_ClassD.cpp');
end

View File

@ -0,0 +1,8 @@
// automatically generated by wrap
#include <wrap/matlab.h>
#include <path/to/ns1.h>
void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
{
checkArguments("delete_ns1ClassA",nargout,nargin,1);
delete_shared_ptr< ns1::ClassA >(in[0],"ns1ClassA");
}

View File

@ -0,0 +1,4 @@
% automatically generated by wrap
function result = delete_ns1ClassA(obj)
error('need to compile delete_ns1ClassA.cpp');
end

View File

@ -0,0 +1,9 @@
// automatically generated by wrap
#include <wrap/matlab.h>
#include <path/to/ns1.h>
#include <path/to/ns1/ClassB.h>
void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
{
checkArguments("delete_ns1ClassB",nargout,nargin,1);
delete_shared_ptr< ns1::ClassB >(in[0],"ns1ClassB");
}

View File

@ -0,0 +1,4 @@
% automatically generated by wrap
function result = delete_ns1ClassB(obj)
error('need to compile delete_ns1ClassB.cpp');
end

View File

@ -0,0 +1,9 @@
// automatically generated by wrap
#include <wrap/matlab.h>
#include <path/to/ns2.h>
#include <path/to/ns2/ClassA.h>
void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
{
checkArguments("delete_ns2ClassA",nargout,nargin,1);
delete_shared_ptr< ns2::ClassA >(in[0],"ns2ClassA");
}

View File

@ -0,0 +1,4 @@
% automatically generated by wrap
function result = delete_ns2ClassA(obj)
error('need to compile delete_ns2ClassA.cpp');
end

View File

@ -0,0 +1,8 @@
// automatically generated by wrap
#include <wrap/matlab.h>
#include <path/to/ns2.h>
void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
{
checkArguments("delete_ns2ClassC",nargout,nargin,1);
delete_shared_ptr< ns2::ClassC >(in[0],"ns2ClassC");
}

View File

@ -0,0 +1,4 @@
% automatically generated by wrap
function result = delete_ns2ClassC(obj)
error('need to compile delete_ns2ClassC.cpp');
end

View File

@ -0,0 +1,9 @@
// automatically generated by wrap
#include <wrap/matlab.h>
#include <path/to/ns2.h>
#include <path/to/ns3.h>
void mexFunction(int nargout, mxArray *out[], int nargin, const mxArray *in[])
{
checkArguments("delete_ns2ns3ClassB",nargout,nargin,1);
delete_shared_ptr< ns2::ns3::ClassB >(in[0],"ns2ns3ClassB");
}

View File

@ -0,0 +1,4 @@
% automatically generated by wrap
function result = delete_ns2ns3ClassB(obj)
error('need to compile delete_ns2ns3ClassB.cpp');
end

View File

@ -10,18 +10,21 @@ addpath(toolboxpath);
%% ns1ClassA
cd(toolboxpath)
mex -O5 new_ns1ClassA_.cpp
mex -O5 delete_ns1ClassA.cpp
cd @ns1ClassA
%% ns1ClassB
cd(toolboxpath)
mex -O5 new_ns1ClassB_.cpp
mex -O5 delete_ns1ClassB.cpp
cd @ns1ClassB
%% ns2ClassA
cd(toolboxpath)
mex -O5 new_ns2ClassA_.cpp
mex -O5 delete_ns2ClassA.cpp
mex -O5 ns2ClassA_afunction.cpp
cd @ns2ClassA
@ -32,18 +35,21 @@ mex -O5 nsReturn.cpp
%% ns2ns3ClassB
cd(toolboxpath)
mex -O5 new_ns2ns3ClassB_.cpp
mex -O5 delete_ns2ns3ClassB.cpp
cd @ns2ns3ClassB
%% ns2ClassC
cd(toolboxpath)
mex -O5 new_ns2ClassC_.cpp
mex -O5 delete_ns2ClassC.cpp
cd @ns2ClassC
%% ClassD
cd(toolboxpath)
mex -O5 new_ClassD_.cpp
mex -O5 delete_ClassD.cpp
cd @ClassD

7
wrap/tests/testMemory.m Normal file
View File

@ -0,0 +1,7 @@
%MATLAB testing file for memory allocation and leaks
%Andrew Melim
addpath([pwd,'/../../../toolbox/gtsam']);
for i=1:100000
p = gtsamPoint2()
end

View File

@ -226,6 +226,18 @@ TEST( wrap, matlab_code_namespaces ) {
EXPECT(files_equal(exp_path + "new_ns2ClassC_.m" , act_path + "new_ns2ClassC_.m" ));
EXPECT(files_equal(exp_path + "new_ns2ns3ClassB_.cpp" , act_path + "new_ns2ns3ClassB_.cpp" ));
EXPECT(files_equal(exp_path + "new_ns2ns3ClassB_.m" , act_path + "new_ns2ns3ClassB_.m" ));
EXPECT(files_equal(exp_path + "delete_ClassD.cpp" , act_path + "delete_ClassD.cpp" ));
EXPECT(files_equal(exp_path + "delete_ClassD.m" , act_path + "delete_ClassD.m" ));
EXPECT(files_equal(exp_path + "delete_ns1ClassA.cpp" , act_path + "delete_ns1ClassA.cpp" ));
EXPECT(files_equal(exp_path + "delete_ns1ClassA.m" , act_path + "delete_ns1ClassA.m" ));
EXPECT(files_equal(exp_path + "delete_ns1ClassB.cpp" , act_path + "delete_ns1ClassB.cpp" ));
EXPECT(files_equal(exp_path + "delete_ns1ClassB.m" , act_path + "delete_ns1ClassB.m" ));
EXPECT(files_equal(exp_path + "delete_ns2ClassA.cpp" , act_path + "delete_ns2ClassA.cpp" ));
EXPECT(files_equal(exp_path + "delete_ns2ClassA.m" , act_path + "delete_ns2ClassA.m" ));
EXPECT(files_equal(exp_path + "delete_ns2ClassC.cpp" , act_path + "delete_ns2ClassC.cpp" ));
EXPECT(files_equal(exp_path + "delete_ns2ClassC.m" , act_path + "delete_ns2ClassC.m" ));
EXPECT(files_equal(exp_path + "delete_ns2ns3ClassB.cpp" , act_path + "delete_ns2ns3ClassB.cpp" ));
EXPECT(files_equal(exp_path + "delete_ns2ns3ClassB.m" , act_path + "delete_ns2ns3ClassB.m" ));
EXPECT(files_equal(exp_path + "ns2ClassA_afunction.cpp" , act_path + "ns2ClassA_afunction.cpp" ));
EXPECT(files_equal(exp_path + "ns2ClassA_afunction.m" , act_path + "ns2ClassA_afunction.m" ));
@ -267,6 +279,8 @@ TEST( wrap, matlab_code ) {
EXPECT(files_equal(path + "/tests/expected/@Point3/norm.cpp" , "actual/@Point3/norm.cpp" ));
EXPECT(files_equal(path + "/tests/expected/new_Test_.cpp" , "actual/new_Test_.cpp" ));
EXPECT(files_equal(path + "/tests/expected/delete_Test.cpp" , "actual/delete_Test.cpp" ));
EXPECT(files_equal(path + "/tests/expected/delete_Test.m" , "actual/delete_Test.m" ));
EXPECT(files_equal(path + "/tests/expected/@Test/Test.m" , "actual/@Test/Test.m" ));
EXPECT(files_equal(path + "/tests/expected/@Test/return_string.cpp" , "actual/@Test/return_string.cpp" ));
EXPECT(files_equal(path + "/tests/expected/@Test/return_pair.cpp" , "actual/@Test/return_pair.cpp" ));