Merge branch 'manifold-2' into feature/ImuFactorPush2
commit
e51b1cb856
|
|
@ -64,6 +64,7 @@ option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TB
|
|||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" ON)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module" ON)
|
||||
|
||||
# Options relating to MATLAB wrapper
|
||||
# TODO: Check for matlab mex binary before handling building of binaries
|
||||
|
|
@ -344,6 +345,20 @@ if (GTSAM_INSTALL_MATLAB_TOOLBOX)
|
|||
add_subdirectory(matlab)
|
||||
endif()
|
||||
|
||||
# Python wrap
|
||||
if (GTSAM_BUILD_PYTHON)
|
||||
include(GtsamPythonWrap)
|
||||
|
||||
# NOTE: The automatic generation of python wrapper from the gtsampy.h interface is
|
||||
# not working yet, so we're using a handwritten wrapper files on python/handwritten.
|
||||
# Once the python wrapping from the interface file is working, you can _swap_ the
|
||||
# comments on the next lines
|
||||
|
||||
# wrap_and_install_python(gtsampy.h "${GTSAM_ADDITIONAL_LIBRARIES}" "")
|
||||
add_subdirectory(python)
|
||||
|
||||
endif()
|
||||
|
||||
# Build gtsam_unstable
|
||||
if (GTSAM_BUILD_UNSTABLE)
|
||||
add_subdirectory(gtsam_unstable)
|
||||
|
|
@ -460,6 +475,17 @@ print_config_flag(${GTSAM_POSE3_EXPMAP} "Pose3 retract is full Ex
|
|||
message(STATUS "MATLAB toolbox flags ")
|
||||
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install matlab toolbox ")
|
||||
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
|
||||
|
||||
message(STATUS "Python module flags ")
|
||||
|
||||
if(GTSAM_PYTHON_WARNINGS)
|
||||
message(STATUS " Build python module : No - dependencies missing")
|
||||
else()
|
||||
print_config_flag(${GTSAM_BUILD_PYTHON} "Build python module ")
|
||||
endif()
|
||||
if(GTSAM_BUILD_PYTHON)
|
||||
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
|
||||
endif()
|
||||
message(STATUS "===============================================================")
|
||||
|
||||
# Print warnings at the end
|
||||
|
|
@ -472,6 +498,9 @@ endif()
|
|||
if(GTSAM_WITH_EIGEN_MKL_OPENMP AND NOT OPENMP_FOUND AND MKL_FOUND)
|
||||
message(WARNING "Your compiler does not support OpenMP - this is ok, but performance may be improved with OpenMP. Set GTSAM_WITH_EIGEN_MKL_OPENMP to 'Off' to avoid this warning.")
|
||||
endif()
|
||||
if(GTSAM_BUILD_PYTHON AND GTSAM_PYTHON_WARNINGS)
|
||||
message(WARNING "${GTSAM_PYTHON_WARNINGS}")
|
||||
endif()
|
||||
|
||||
# Include CPack *after* all flags
|
||||
include(CPack)
|
||||
|
|
|
|||
4
THANKS
4
THANKS
|
|
@ -38,6 +38,10 @@ at Uni Zurich:
|
|||
|
||||
* Christian Forster
|
||||
|
||||
at LAAS-CNRS
|
||||
|
||||
* Ellon Paiva
|
||||
|
||||
Many thanks for your hard work!!!!
|
||||
|
||||
Frank Dellaert
|
||||
|
|
|
|||
|
|
@ -0,0 +1,102 @@
|
|||
# - Find the NumPy libraries
|
||||
# This module finds if NumPy is installed, and sets the following variables
|
||||
# indicating where it is.
|
||||
#
|
||||
# TODO: Update to provide the libraries and paths for linking npymath lib.
|
||||
#
|
||||
# NUMPY_FOUND - was NumPy found
|
||||
# NUMPY_VERSION - the version of NumPy found as a string
|
||||
# NUMPY_VERSION_MAJOR - the major version number of NumPy
|
||||
# NUMPY_VERSION_MINOR - the minor version number of NumPy
|
||||
# NUMPY_VERSION_PATCH - the patch version number of NumPy
|
||||
# NUMPY_VERSION_DECIMAL - e.g. version 1.6.1 is 10601
|
||||
# NUMPY_INCLUDE_DIRS - path to the NumPy include files
|
||||
|
||||
#============================================================================
|
||||
# Copyright 2012 Continuum Analytics, Inc.
|
||||
#
|
||||
# MIT License
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files
|
||||
# (the "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
# persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included
|
||||
# in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
#
|
||||
#============================================================================
|
||||
|
||||
# Finding NumPy involves calling the Python interpreter
|
||||
if(NumPy_FIND_REQUIRED)
|
||||
find_package(PythonInterp REQUIRED)
|
||||
else()
|
||||
find_package(PythonInterp)
|
||||
endif()
|
||||
|
||||
if(NOT PYTHONINTERP_FOUND)
|
||||
set(NUMPY_FOUND FALSE)
|
||||
return()
|
||||
endif()
|
||||
|
||||
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c"
|
||||
"import numpy as n; print(n.__version__); print(n.get_include());"
|
||||
RESULT_VARIABLE _NUMPY_SEARCH_SUCCESS
|
||||
OUTPUT_VARIABLE _NUMPY_VALUES_OUTPUT
|
||||
ERROR_VARIABLE _NUMPY_ERROR_VALUE
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
if(NOT _NUMPY_SEARCH_SUCCESS MATCHES 0)
|
||||
if(NumPy_FIND_REQUIRED)
|
||||
message(FATAL_ERROR
|
||||
"NumPy import failure:\n${_NUMPY_ERROR_VALUE}")
|
||||
endif()
|
||||
set(NUMPY_FOUND FALSE)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# Convert the process output into a list
|
||||
string(REGEX REPLACE ";" "\\\\;" _NUMPY_VALUES ${_NUMPY_VALUES_OUTPUT})
|
||||
string(REGEX REPLACE "\n" ";" _NUMPY_VALUES ${_NUMPY_VALUES})
|
||||
# Just in case there is unexpected output from the Python command.
|
||||
list(GET _NUMPY_VALUES -2 NUMPY_VERSION)
|
||||
list(GET _NUMPY_VALUES -1 NUMPY_INCLUDE_DIRS)
|
||||
|
||||
string(REGEX MATCH "^[0-9]+\\.[0-9]+\\.[0-9]+" _VER_CHECK "${NUMPY_VERSION}")
|
||||
if("${_VER_CHECK}" STREQUAL "")
|
||||
# The output from Python was unexpected. Raise an error always
|
||||
# here, because we found NumPy, but it appears to be corrupted somehow.
|
||||
message(FATAL_ERROR
|
||||
"Requested version and include path from NumPy, got instead:\n${_NUMPY_VALUES_OUTPUT}\n")
|
||||
return()
|
||||
endif()
|
||||
|
||||
# Make sure all directory separators are '/'
|
||||
string(REGEX REPLACE "\\\\" "/" NUMPY_INCLUDE_DIRS ${NUMPY_INCLUDE_DIRS})
|
||||
|
||||
# Get the major and minor version numbers
|
||||
string(REGEX REPLACE "\\." ";" _NUMPY_VERSION_LIST ${NUMPY_VERSION})
|
||||
list(GET _NUMPY_VERSION_LIST 0 NUMPY_VERSION_MAJOR)
|
||||
list(GET _NUMPY_VERSION_LIST 1 NUMPY_VERSION_MINOR)
|
||||
list(GET _NUMPY_VERSION_LIST 2 NUMPY_VERSION_PATCH)
|
||||
string(REGEX MATCH "[0-9]*" NUMPY_VERSION_PATCH ${NUMPY_VERSION_PATCH})
|
||||
math(EXPR NUMPY_VERSION_DECIMAL
|
||||
"(${NUMPY_VERSION_MAJOR} * 10000) + (${NUMPY_VERSION_MINOR} * 100) + ${NUMPY_VERSION_PATCH}")
|
||||
|
||||
find_package_message(NUMPY
|
||||
"Found NumPy: version \"${NUMPY_VERSION}\" ${NUMPY_INCLUDE_DIRS}"
|
||||
"${NUMPY_INCLUDE_DIRS}${NUMPY_VERSION}")
|
||||
|
||||
set(NUMPY_FOUND TRUE)
|
||||
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
#Setup cache options
|
||||
option(GTSAM_BUILD_PYTHON "Build Python wrapper statically (increases build time)" OFF)
|
||||
set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "Target python version for GTSAM python module. Use 'Default' to chose the default version")
|
||||
set(GTSAM_BUILD_PYTHON_FLAGS "" CACHE STRING "Extra flags for running Matlab PYTHON compilation")
|
||||
set(GTSAM_PYTHON_INSTALL_PATH "" CACHE PATH "Python toolbox destination, blank defaults to CMAKE_INSTALL_PREFIX/borg/python")
|
||||
if(NOT GTSAM_PYTHON_INSTALL_PATH)
|
||||
|
|
@ -8,13 +8,13 @@ endif()
|
|||
|
||||
#Author: Paul Furgale Modified by Andrew Melim
|
||||
function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY)
|
||||
# Boost
|
||||
find_package(Boost COMPONENTS python filesystem system REQUIRED)
|
||||
include_directories(${Boost_INCLUDE_DIRS})
|
||||
# # Boost
|
||||
# find_package(Boost COMPONENTS python filesystem system REQUIRED)
|
||||
# include_directories(${Boost_INCLUDE_DIRS})
|
||||
|
||||
# Find Python
|
||||
FIND_PACKAGE(PythonLibs 2.7 REQUIRED)
|
||||
INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIRS})
|
||||
# # Find Python
|
||||
# FIND_PACKAGE(PythonLibs 2.7 REQUIRED)
|
||||
# INCLUDE_DIRECTORIES(${PYTHON_INCLUDE_DIRS})
|
||||
|
||||
IF(APPLE)
|
||||
# The apple framework headers don't include the numpy headers for some reason.
|
||||
|
|
@ -36,23 +36,46 @@ function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY)
|
|||
ENDIF()
|
||||
ENDIF(APPLE)
|
||||
|
||||
if(MSVC)
|
||||
add_library(${moduleName}_python MODULE ${ARGN})
|
||||
set_target_properties(${moduleName}_python PROPERTIES
|
||||
OUTPUT_NAME ${moduleName}_python
|
||||
CLEAN_DIRECT_OUTPUT 1
|
||||
VERSION 1
|
||||
SOVERSION 0
|
||||
SUFFIX ".pyd")
|
||||
target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp
|
||||
|
||||
# Create a static library version
|
||||
add_library(${TARGET_NAME} SHARED ${ARGN})
|
||||
set(PYLIB_OUTPUT_FILE $<TARGET_FILE:${moduleName}_python>)
|
||||
message(${PYLIB_OUTPUT_FILE})
|
||||
get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE)
|
||||
set(PYLIB_SO_NAME ${PYLIB_OUTPUT_NAME}.pyd)
|
||||
|
||||
target_link_libraries(${TARGET_NAME} ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} gtsam-shared)
|
||||
set_target_properties(${TARGET_NAME} PROPERTIES
|
||||
OUTPUT_NAME ${TARGET_NAME}
|
||||
CLEAN_DIRECT_OUTPUT 1
|
||||
VERSION 1
|
||||
SOVERSION 0)
|
||||
ELSE()
|
||||
# Create a shared library
|
||||
add_library(${moduleName}_python SHARED ${generated_cpp_file})
|
||||
|
||||
set_target_properties(${moduleName}_python PROPERTIES
|
||||
OUTPUT_NAME ${moduleName}_python
|
||||
CLEAN_DIRECT_OUTPUT 1)
|
||||
target_link_libraries(${moduleName}_python ${Boost_PYTHON_LIBRARY} ${PYTHON_LIBRARY} ${gtsamLib}) #temp
|
||||
# On OSX and Linux, the python library must end in the extension .so. Build this
|
||||
# filename here.
|
||||
get_property(PYLIB_OUTPUT_FILE TARGET ${moduleName}_python PROPERTY LOCATION)
|
||||
set(PYLIB_OUTPUT_FILE $<TARGET_FILE:${moduleName}_python>)
|
||||
message(${PYLIB_OUTPUT_FILE})
|
||||
get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE)
|
||||
set(PYLIB_SO_NAME lib${moduleName}_python.so)
|
||||
ENDIF(MSVC)
|
||||
|
||||
# On OSX and Linux, the python library must end in the extension .so. Build this
|
||||
# filename here.
|
||||
get_property(PYLIB_OUTPUT_FILE TARGET ${TARGET_NAME} PROPERTY LOCATION)
|
||||
get_filename_component(PYLIB_OUTPUT_NAME ${PYLIB_OUTPUT_FILE} NAME_WE)
|
||||
set(PYLIB_SO_NAME ${PYLIB_OUTPUT_NAME}.so)
|
||||
# Installs the library in the gtsam folder, which is used by setup.py to create the gtsam package
|
||||
set(PYTHON_MODULE_DIRECTORY ${CMAKE_SOURCE_DIR}/python/gtsam)
|
||||
# Cause the library to be output in the correct directory.
|
||||
add_custom_command(TARGET ${moduleName}_python
|
||||
POST_BUILD
|
||||
COMMAND cp -v ${PYLIB_OUTPUT_FILE} ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME}
|
||||
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||
COMMENT "Copying library files to python directory" )
|
||||
|
||||
# Cause the library to be output in the correct directory.
|
||||
add_custom_command(TARGET ${TARGET_NAME}
|
||||
|
|
@ -64,4 +87,16 @@ function(wrap_python TARGET_NAME PYTHON_MODULE_DIRECTORY)
|
|||
get_directory_property(AMCF ADDITIONAL_MAKE_CLEAN_FILES)
|
||||
list(APPEND AMCF ${PYTHON_MODULE_DIRECTORY}/${PYLIB_SO_NAME})
|
||||
set_directory_properties(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${AMCF}")
|
||||
endfunction(wrap_python)
|
||||
endfunction(wrap_python)
|
||||
|
||||
# Macro to get list of subdirectories
|
||||
macro(SUBDIRLIST result curdir)
|
||||
file(GLOB children RELATIVE ${curdir} ${curdir}/*)
|
||||
set(dirlist "")
|
||||
foreach(child ${children})
|
||||
if(IS_DIRECTORY ${curdir}/${child})
|
||||
list(APPEND dirlist ${child})
|
||||
endif()
|
||||
endforeach()
|
||||
set(${result} ${dirlist})
|
||||
endmacro()
|
||||
|
|
|
|||
|
|
@ -0,0 +1,6 @@
|
|||
numpy_eigen
|
||||
===========
|
||||
|
||||
A boost python converter that handles the copy of matrices from the Eigen linear algebra library in C++ to numpy in Python.
|
||||
|
||||
This is a minimal version based on the original from Paul Furgale (https://github.com/ethz-asl/Schweizer-Messer/tree/master/numpy_eigen)
|
||||
|
|
@ -0,0 +1,334 @@
|
|||
/**
|
||||
* @file NumpyEigenConverter.hpp
|
||||
* @author Paul Furgale <paul.furgale@utoronto.ca>
|
||||
* @date Fri Feb 4 11:17:25 2011
|
||||
*
|
||||
* @brief Classes to support conversion from numpy arrays in Python
|
||||
* to Eigen3 matrices in c++
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef NUMPY_EIGEN_CONVERTER_HPP
|
||||
#define NUMPY_EIGEN_CONVERTER_HPP
|
||||
|
||||
#include <numpy_eigen/boost_python_headers.hpp>
|
||||
//#include <iostream>
|
||||
|
||||
#include "numpy/numpyconfig.h"
|
||||
#ifdef NPY_1_7_API_VERSION
|
||||
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
|
||||
#define NPE_PY_ARRAY_OBJECT PyArrayObject
|
||||
#else
|
||||
//TODO Remove this as soon as support for Numpy version before 1.7 is dropped
|
||||
#define NPE_PY_ARRAY_OBJECT PyObject
|
||||
#endif
|
||||
|
||||
#define PY_ARRAY_UNIQUE_SYMBOL NP_Eigen_AS
|
||||
#include <numpy/arrayobject.h>
|
||||
|
||||
#include "type_traits.hpp"
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include "copy_routines.hpp"
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @class NumpyEigenConverter
|
||||
* @tparam the Eigen3 matrix type this class is specialized for
|
||||
*
|
||||
* adapted from http://misspent.wordpress.com/2009/09/27/how-to-write-boost-python-converters/
|
||||
* General help available http://docs.scipy.org/doc/numpy/reference/c-api.array.html
|
||||
*
|
||||
* To use:
|
||||
*
|
||||
* #include <NumpyEigenConverter.hpp>
|
||||
*
|
||||
*
|
||||
* BOOST_PYTHON_MODULE(libmy_module_python)
|
||||
* {
|
||||
* // The converters will cause a segfault unless import_array() is called before the first one
|
||||
* import_array();
|
||||
* NumpyEigenConverter<Eigen::Matrix< double, 1, 1 > >::register_converter();
|
||||
* NumpyEigenConverter<Eigen::Matrix< double, 2, 1 > >::register_converter();
|
||||
* }
|
||||
*
|
||||
*/
|
||||
template<typename EIGEN_MATRIX_T>
|
||||
struct NumpyEigenConverter
|
||||
{
|
||||
|
||||
typedef EIGEN_MATRIX_T matrix_t;
|
||||
typedef typename matrix_t::Scalar scalar_t;
|
||||
|
||||
enum {
|
||||
RowsAtCompileTime = matrix_t::RowsAtCompileTime,
|
||||
ColsAtCompileTime = matrix_t::ColsAtCompileTime,
|
||||
MaxRowsAtCompileTime = matrix_t::MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime = matrix_t::MaxColsAtCompileTime,
|
||||
NpyType = TypeToNumPy<scalar_t>::NpyType,
|
||||
//Flags = ei_compute_matrix_flags<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>::ret,
|
||||
//CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
Options = matrix_t::Options
|
||||
//InnerStrideAtCompileTime = 1,
|
||||
//OuterStrideAtCompileTime = (Options&RowMajor) ? ColsAtCompileTime : RowsAtCompileTime
|
||||
};
|
||||
|
||||
static std::string castSizeOption(int option)
|
||||
{
|
||||
if(option == Eigen::Dynamic)
|
||||
return "Dynamic";
|
||||
else
|
||||
return boost::lexical_cast<std::string>(option);
|
||||
}
|
||||
|
||||
static std::string toString()
|
||||
{
|
||||
return std::string() + "Eigen::Matrix<" + TypeToNumPy<scalar_t>::typeString() + ", " +
|
||||
castSizeOption(RowsAtCompileTime) + ", " +
|
||||
castSizeOption(ColsAtCompileTime) + ", " +
|
||||
boost::lexical_cast<std::string>((int)Options) + ", " +
|
||||
castSizeOption(MaxRowsAtCompileTime) + ", " +
|
||||
castSizeOption(MaxColsAtCompileTime) + ">";
|
||||
}
|
||||
|
||||
// The "Convert from C to Python" API
|
||||
static PyObject * convert(const matrix_t & M)
|
||||
{
|
||||
PyObject * P = NULL;
|
||||
if(RowsAtCompileTime == 1 || ColsAtCompileTime == 1)
|
||||
{
|
||||
// Create a 1D array
|
||||
npy_intp dimensions[1];
|
||||
dimensions[0] = M.size();
|
||||
P = PyArray_SimpleNew(1, dimensions, TypeToNumPy<scalar_t>::NpyType);
|
||||
numpyTypeDemuxer< CopyEigenToNumpyVector<const matrix_t> >(&M, reinterpret_cast<NPE_PY_ARRAY_OBJECT*>(P));
|
||||
}
|
||||
else
|
||||
{
|
||||
// create a 2D array.
|
||||
npy_intp dimensions[2];
|
||||
dimensions[0] = M.rows();
|
||||
dimensions[1] = M.cols();
|
||||
P = PyArray_SimpleNew(2, dimensions, TypeToNumPy<scalar_t>::NpyType);
|
||||
numpyTypeDemuxer< CopyEigenToNumpyMatrix<const matrix_t> >(&M, reinterpret_cast<NPE_PY_ARRAY_OBJECT*>(P));
|
||||
}
|
||||
|
||||
// incrementing the reference seems to cause a memory leak.
|
||||
// boost::python::incref(P);
|
||||
// This agrees with the sample code found here:
|
||||
// http://mail.python.org/pipermail/cplusplus-sig/2008-October/013825.html
|
||||
return P;
|
||||
}
|
||||
|
||||
static bool isDimensionValid(int requestedSize, int sizeAtCompileTime, int maxSizeAtCompileTime)
|
||||
{
|
||||
bool valid = true;
|
||||
if(sizeAtCompileTime == Eigen::Dynamic)
|
||||
{
|
||||
// Check for dynamic fixed size
|
||||
// http://eigen.tuxfamily.org/dox-devel/TutorialMatrixClass.html#TutorialMatrixOptTemplParams
|
||||
if(!(maxSizeAtCompileTime == Eigen::Dynamic || requestedSize <= maxSizeAtCompileTime))
|
||||
{
|
||||
valid = false;
|
||||
}
|
||||
}
|
||||
else if(sizeAtCompileTime != requestedSize)
|
||||
{
|
||||
valid = false;
|
||||
}
|
||||
return valid;
|
||||
}
|
||||
|
||||
static void checkMatrixSizes(NPE_PY_ARRAY_OBJECT * obj_ptr)
|
||||
{
|
||||
int rows = PyArray_DIM(obj_ptr, 0);
|
||||
int cols = PyArray_DIM(obj_ptr, 1);
|
||||
|
||||
bool rowsValid = isDimensionValid(rows, RowsAtCompileTime, MaxRowsAtCompileTime);
|
||||
bool colsValid = isDimensionValid(cols, ColsAtCompileTime, MaxColsAtCompileTime);
|
||||
if(!rowsValid || !colsValid)
|
||||
{
|
||||
THROW_TYPE_ERROR("Can not convert " << npyArrayTypeString(obj_ptr) << " to " << toString()
|
||||
<< ". Mismatched sizes.");
|
||||
}
|
||||
}
|
||||
|
||||
static void checkRowVectorSizes(NPE_PY_ARRAY_OBJECT * obj_ptr, int cols)
|
||||
{
|
||||
if(!isDimensionValid(cols, ColsAtCompileTime, MaxColsAtCompileTime))
|
||||
{
|
||||
THROW_TYPE_ERROR("Can not convert " << npyArrayTypeString(obj_ptr) << " to " << toString()
|
||||
<< ". Mismatched sizes.");
|
||||
}
|
||||
}
|
||||
|
||||
static void checkColumnVectorSizes(NPE_PY_ARRAY_OBJECT * obj_ptr, int rows)
|
||||
{
|
||||
// Check if the type can accomidate one column.
|
||||
if(ColsAtCompileTime == Eigen::Dynamic || ColsAtCompileTime == 1)
|
||||
{
|
||||
if(!isDimensionValid(rows, RowsAtCompileTime, MaxRowsAtCompileTime))
|
||||
{
|
||||
THROW_TYPE_ERROR("Can not convert " << npyArrayTypeString(obj_ptr) << " to " << toString()
|
||||
<< ". Mismatched sizes.");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
THROW_TYPE_ERROR("Can not convert " << npyArrayTypeString(obj_ptr) << " to " << toString()
|
||||
<< ". Mismatched sizes.");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void checkVectorSizes(NPE_PY_ARRAY_OBJECT * obj_ptr)
|
||||
{
|
||||
int size = PyArray_DIM(obj_ptr, 0);
|
||||
|
||||
// If the number of rows is fixed at 1, assume that is the sense of the vector.
|
||||
// Otherwise, assume it is a column.
|
||||
if(RowsAtCompileTime == 1)
|
||||
{
|
||||
checkRowVectorSizes(obj_ptr, size);
|
||||
}
|
||||
else
|
||||
{
|
||||
checkColumnVectorSizes(obj_ptr, size);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void* convertible(PyObject *obj_ptr)
|
||||
{
|
||||
// Check for a null pointer.
|
||||
if(!obj_ptr)
|
||||
{
|
||||
//THROW_TYPE_ERROR("PyObject pointer was null");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Make sure this is a numpy array.
|
||||
if (!PyArray_Check(obj_ptr))
|
||||
{
|
||||
//THROW_TYPE_ERROR("Conversion is only defined for numpy array and matrix types");
|
||||
return 0;
|
||||
}
|
||||
|
||||
NPE_PY_ARRAY_OBJECT * array_ptr = reinterpret_cast<NPE_PY_ARRAY_OBJECT*>(obj_ptr);
|
||||
|
||||
// Check the type of the array.
|
||||
int npyType = getNpyType(array_ptr);
|
||||
|
||||
if(!TypeToNumPy<scalar_t>::canConvert(npyType))
|
||||
{
|
||||
//THROW_TYPE_ERROR("Can not convert " << npyArrayTypeString(obj_ptr) << " to " << toString()
|
||||
// << ". Mismatched types.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Check the array dimensions.
|
||||
int nd = PyArray_NDIM(array_ptr);
|
||||
|
||||
if(nd != 1 && nd != 2)
|
||||
{
|
||||
THROW_TYPE_ERROR("Conversion is only valid for arrays with 1 or 2 dimensions. Argument has " << nd << " dimensions");
|
||||
}
|
||||
|
||||
if(nd == 1)
|
||||
{
|
||||
checkVectorSizes(array_ptr);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Two-dimensional matrix type.
|
||||
checkMatrixSizes(array_ptr);
|
||||
}
|
||||
|
||||
|
||||
return obj_ptr;
|
||||
}
|
||||
|
||||
|
||||
static void construct(PyObject *obj_ptr, boost::python::converter::rvalue_from_python_stage1_data *data)
|
||||
{
|
||||
boost::python::converter::rvalue_from_python_storage<matrix_t> * matData = reinterpret_cast<boost::python::converter::rvalue_from_python_storage<matrix_t> * >(data);
|
||||
void* storage = matData->storage.bytes;
|
||||
|
||||
// Make sure storage is 16byte aligned. With help from code from Memory.h
|
||||
void * aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(storage) & ~(size_t(15))) + 16);
|
||||
|
||||
matrix_t * Mp = new (aligned) matrix_t();
|
||||
// Stash the memory chunk pointer for later use by boost.python
|
||||
// This signals boost::python that the new value must be deleted eventually
|
||||
data->convertible = storage;
|
||||
|
||||
|
||||
// std::cout << "Creating aligned pointer " << aligned << " from storage " << storage << std::endl;
|
||||
// std::cout << "matrix size: " << sizeof(matrix_t) << std::endl;
|
||||
// std::cout << "referent size: " << boost::python::detail::referent_size< matrix_t & >::value << std::endl;
|
||||
// std::cout << "sizeof(storage): " << sizeof(matData->storage) << std::endl;
|
||||
// std::cout << "sizeof(bytes): " << sizeof(matData->storage.bytes) << std::endl;
|
||||
|
||||
|
||||
|
||||
matrix_t & M = *Mp;
|
||||
|
||||
if (!PyArray_Check(obj_ptr))
|
||||
{
|
||||
THROW_TYPE_ERROR("construct is only defined for numpy array and matrix types");
|
||||
}
|
||||
|
||||
NPE_PY_ARRAY_OBJECT * array_ptr = reinterpret_cast<NPE_PY_ARRAY_OBJECT*>(obj_ptr);
|
||||
|
||||
int nd = PyArray_NDIM(array_ptr);
|
||||
if(nd == 1)
|
||||
{
|
||||
int size = PyArray_DIM(array_ptr, 0);
|
||||
// This is a vector type
|
||||
if(RowsAtCompileTime == 1)
|
||||
{
|
||||
// Row Vector
|
||||
M.resize(1,size);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Column Vector
|
||||
M.resize(size,1);
|
||||
}
|
||||
numpyTypeDemuxer< CopyNumpyToEigenVector<matrix_t> >(&M, array_ptr);
|
||||
}
|
||||
else
|
||||
{
|
||||
int rows = PyArray_DIM(array_ptr, 0);
|
||||
int cols = PyArray_DIM(array_ptr, 1);
|
||||
|
||||
M.resize(rows,cols);
|
||||
numpyTypeDemuxer< CopyNumpyToEigenMatrix<matrix_t> >(&M, array_ptr);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
// The registration function.
|
||||
static void register_converter()
|
||||
{
|
||||
boost::python::to_python_converter<matrix_t,NumpyEigenConverter>();
|
||||
boost::python::converter::registry::push_back(
|
||||
&NumpyEigenConverter::convertible,
|
||||
&NumpyEigenConverter::construct,
|
||||
boost::python::type_id<matrix_t>());
|
||||
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
#endif /* NUMPY_EIGEN_CONVERTER_HPP */
|
||||
250
gtsam/3rdparty/numpy_eigen/include/numpy_eigen/boost_python_headers.hpp
vendored
Executable file
250
gtsam/3rdparty/numpy_eigen/include/numpy_eigen/boost_python_headers.hpp
vendored
Executable file
|
|
@ -0,0 +1,250 @@
|
|||
/**
|
||||
* @file boost_python_headers.hpp
|
||||
* @author Paul Furgale <paul.furgale@gmail.com>
|
||||
* @date Mon Dec 12 10:36:03 2011
|
||||
*
|
||||
* @brief A header that specializes boost-python to work with fixed-sized Eigen types.
|
||||
*
|
||||
* The original version of this library did not include these specializations and this caused
|
||||
* assert failures when running on Ubuntu 10.04 32-bit. More information about fixed-size
|
||||
* vectorizable types in Eigen is available here:
|
||||
* http://eigen.tuxfamily.org/dox-devel/TopicFixedSizeVectorizable.html
|
||||
*
|
||||
* This code has been tested on Ubunutu 10.04 64 and 32 bit, OSX Snow Leopard and OSX Lion.
|
||||
*
|
||||
* This code is derived from boost/python/converter/arg_from_python.hpp
|
||||
* Copyright David Abrahams 2002.
|
||||
* Distributed under the Boost Software License, Version 1.0. (See http://www.boost.org/LICENSE_1_0.txt)
|
||||
*
|
||||
*/
|
||||
#ifndef NUMPY_EIGEN_CONVERTERS_HPP
|
||||
#define NUMPY_EIGEN_CONVERTERS_HPP
|
||||
|
||||
#include <Eigen/Core>
|
||||
#include <boost/python.hpp>
|
||||
#include <boost/python/detail/referent_storage.hpp>
|
||||
#include <boost/python/converter/arg_from_python.hpp>
|
||||
#include <boost/python/converter/rvalue_from_python_data.hpp>
|
||||
#include <boost/python/tuple.hpp>
|
||||
|
||||
|
||||
namespace boost { namespace python { namespace detail {
|
||||
template<typename T>
|
||||
struct referent_size;
|
||||
|
||||
// This bit of code makes sure we have 16 extra bytes to do the pointer alignment for fixed-sized Eigen types
|
||||
template<typename T, int A, int B, int C, int D, int E>
|
||||
struct referent_size< Eigen::Matrix<T,A,B,C,D,E>& >
|
||||
{
|
||||
// Add 16 bytes so we can get alignment
|
||||
BOOST_STATIC_CONSTANT( std::size_t, value = sizeof(Eigen::Matrix<T,A,B,C,D,E>) + 16);
|
||||
};
|
||||
|
||||
// This bit of code makes sure we have 16 extra bytes to do the pointer alignment for fixed-sized Eigen types
|
||||
template<typename T, int A, int B, int C, int D, int E>
|
||||
struct referent_size< Eigen::Matrix<T,A,B,C,D,E> const & >
|
||||
{
|
||||
// Add 16 bytes so we can get alignment
|
||||
BOOST_STATIC_CONSTANT( std::size_t, value = sizeof(Eigen::Matrix<T,A,B,C,D,E>) + 16);
|
||||
};
|
||||
|
||||
// This bit of code makes sure we have 16 extra bytes to do the pointer alignment for fixed-sized Eigen types
|
||||
template<typename T, int A, int B, int C, int D, int E>
|
||||
struct referent_size< Eigen::Matrix<T,A,B,C,D,E> >
|
||||
{
|
||||
// Add 16 bytes so we can get alignment
|
||||
BOOST_STATIC_CONSTANT( std::size_t, value = sizeof(Eigen::Matrix<T,A,B,C,D,E>) + 16);
|
||||
};
|
||||
|
||||
|
||||
}}}
|
||||
|
||||
namespace boost { namespace python { namespace converter {
|
||||
|
||||
|
||||
template<typename S, int A, int B, int C, int D, int E>
|
||||
struct rvalue_from_python_data< Eigen::Matrix<S,A,B,C,D,E> const &> : rvalue_from_python_storage< Eigen::Matrix<S,A,B,C,D,E> const & >
|
||||
{
|
||||
typedef typename Eigen::Matrix<S,A,B,C,D,E> T;
|
||||
# if (!defined(__MWERKS__) || __MWERKS__ >= 0x3000) \
|
||||
&& (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 245) \
|
||||
&& (!defined(__DECCXX_VER) || __DECCXX_VER > 60590014) \
|
||||
&& !defined(BOOST_PYTHON_SYNOPSIS) /* Synopsis' OpenCXX has trouble parsing this */
|
||||
// This must always be a POD struct with m_data its first member.
|
||||
BOOST_STATIC_ASSERT(BOOST_PYTHON_OFFSETOF(rvalue_from_python_storage<T>,stage1) == 0);
|
||||
# endif
|
||||
|
||||
// The usual constructor
|
||||
rvalue_from_python_data(rvalue_from_python_stage1_data const & _stage1)
|
||||
{
|
||||
this->stage1 = _stage1;
|
||||
}
|
||||
|
||||
|
||||
// This constructor just sets m_convertible -- used by
|
||||
// implicitly_convertible<> to perform the final step of the
|
||||
// conversion, where the construct() function is already known.
|
||||
rvalue_from_python_data(void* convertible)
|
||||
{
|
||||
this->stage1.convertible = convertible;
|
||||
}
|
||||
|
||||
// Destroys any object constructed in the storage.
|
||||
~rvalue_from_python_data()
|
||||
{
|
||||
// Realign the pointer and destroy
|
||||
if (this->stage1.convertible == this->storage.bytes)
|
||||
{
|
||||
void * storage = reinterpret_cast<void *>(this->storage.bytes);
|
||||
T * aligned = reinterpret_cast<T *>(reinterpret_cast<void *>((reinterpret_cast<size_t>(storage) & ~(size_t(15))) + 16));
|
||||
|
||||
//std::cout << "Destroying " << (void*)aligned << std::endl;
|
||||
aligned->T::~T();
|
||||
}
|
||||
}
|
||||
private:
|
||||
typedef typename add_reference<typename add_cv<T>::type>::type ref_type;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// Used when T is a plain value (non-pointer, non-reference) type or
|
||||
// a (non-volatile) const reference to a plain value type.
|
||||
template<typename S, int A, int B, int C, int D, int E>
|
||||
struct arg_rvalue_from_python< Eigen::Matrix<S,A,B,C,D,E> >
|
||||
{
|
||||
typedef Eigen::Matrix<S,A,B,C,D,E> const & T;
|
||||
typedef typename boost::add_reference<
|
||||
T
|
||||
// We can't add_const here, or it would be impossible to pass
|
||||
// auto_ptr<U> args from Python to C++
|
||||
>::type result_type;
|
||||
|
||||
arg_rvalue_from_python(PyObject * obj) : m_data(converter::rvalue_from_python_stage1(obj, registered<T>::converters))
|
||||
, m_source(obj)
|
||||
{
|
||||
|
||||
}
|
||||
bool convertible() const
|
||||
{
|
||||
return m_data.stage1.convertible != 0;
|
||||
}
|
||||
|
||||
|
||||
# if BOOST_MSVC < 1301 || _MSC_FULL_VER > 13102196
|
||||
typename arg_rvalue_from_python<T>::
|
||||
# endif
|
||||
result_type operator()()
|
||||
{
|
||||
if (m_data.stage1.construct != 0)
|
||||
m_data.stage1.construct(m_source, &m_data.stage1);
|
||||
|
||||
// Here is the magic...
|
||||
// Realign the pointer
|
||||
void * storage = reinterpret_cast<void *>(m_data.storage.bytes);
|
||||
void * aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(storage) & ~(size_t(15))) + 16);
|
||||
|
||||
return python::detail::void_ptr_to_reference(aligned, (result_type(*)())0);
|
||||
}
|
||||
|
||||
private:
|
||||
rvalue_from_python_data<result_type> m_data;
|
||||
PyObject* m_source;
|
||||
};
|
||||
|
||||
|
||||
// Used when T is a plain value (non-pointer, non-reference) type or
|
||||
// a (non-volatile) const reference to a plain value type.
|
||||
template<typename S, int A, int B, int C, int D, int E>
|
||||
struct arg_rvalue_from_python< Eigen::Matrix<S,A,B,C,D,E> const & >
|
||||
{
|
||||
typedef Eigen::Matrix<S,A,B,C,D,E> const & T;
|
||||
typedef typename boost::add_reference<
|
||||
T
|
||||
// We can't add_const here, or it would be impossible to pass
|
||||
// auto_ptr<U> args from Python to C++
|
||||
>::type result_type;
|
||||
|
||||
arg_rvalue_from_python(PyObject * obj) : m_data(converter::rvalue_from_python_stage1(obj, registered<T>::converters))
|
||||
, m_source(obj)
|
||||
{
|
||||
|
||||
}
|
||||
bool convertible() const
|
||||
{
|
||||
return m_data.stage1.convertible != 0;
|
||||
}
|
||||
|
||||
|
||||
# if BOOST_MSVC < 1301 || _MSC_FULL_VER > 13102196
|
||||
typename arg_rvalue_from_python<T>::
|
||||
# endif
|
||||
result_type operator()()
|
||||
{
|
||||
if (m_data.stage1.construct != 0)
|
||||
m_data.stage1.construct(m_source, &m_data.stage1);
|
||||
|
||||
// Here is the magic...
|
||||
// Realign the pointer
|
||||
void * storage = reinterpret_cast<void *>(m_data.storage.bytes);
|
||||
void * aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(storage) & ~(size_t(15))) + 16);
|
||||
|
||||
return python::detail::void_ptr_to_reference(aligned, (result_type(*)())0);
|
||||
}
|
||||
|
||||
private:
|
||||
rvalue_from_python_data<result_type> m_data;
|
||||
PyObject* m_source;
|
||||
};
|
||||
|
||||
// Used when T is a plain value (non-pointer, non-reference) type or
|
||||
// a (non-volatile) const reference to a plain value type.
|
||||
template<typename S, int A, int B, int C, int D, int E>
|
||||
struct arg_rvalue_from_python< Eigen::Matrix<S,A,B,C,D,E> const >
|
||||
{
|
||||
typedef Eigen::Matrix<S,A,B,C,D,E> const & T;
|
||||
typedef typename boost::add_reference<
|
||||
T
|
||||
// We can't add_const here, or it would be impossible to pass
|
||||
// auto_ptr<U> args from Python to C++
|
||||
>::type result_type;
|
||||
|
||||
arg_rvalue_from_python(PyObject * obj) : m_data(converter::rvalue_from_python_stage1(obj, registered<T>::converters))
|
||||
, m_source(obj)
|
||||
{
|
||||
|
||||
}
|
||||
bool convertible() const
|
||||
{
|
||||
return m_data.stage1.convertible != 0;
|
||||
}
|
||||
|
||||
|
||||
# if BOOST_MSVC < 1301 || _MSC_FULL_VER > 13102196
|
||||
typename arg_rvalue_from_python<T>::
|
||||
# endif
|
||||
result_type operator()()
|
||||
{
|
||||
if (m_data.stage1.construct != 0)
|
||||
m_data.stage1.construct(m_source, &m_data.stage1);
|
||||
|
||||
// Here is the magic...
|
||||
// Realign the pointer
|
||||
void * storage = reinterpret_cast<void *>(m_data.storage.bytes);
|
||||
void * aligned = reinterpret_cast<void*>((reinterpret_cast<size_t>(storage) & ~(size_t(15))) + 16);
|
||||
|
||||
return python::detail::void_ptr_to_reference(aligned, (result_type(*)())0);
|
||||
}
|
||||
|
||||
private:
|
||||
rvalue_from_python_data<result_type> m_data;
|
||||
PyObject* m_source;
|
||||
};
|
||||
|
||||
|
||||
}}}
|
||||
|
||||
|
||||
#endif /* NUMPY_EIGEN_CONVERTERS_HPP */
|
||||
|
|
@ -0,0 +1,149 @@
|
|||
#ifndef NUMPY_EIGEN_COPY_ROUTINES_HPP
|
||||
#define NUMPY_EIGEN_COPY_ROUTINES_HPP
|
||||
|
||||
|
||||
template<typename EIGEN_T>
|
||||
struct CopyNumpyToEigenMatrix
|
||||
{
|
||||
typedef EIGEN_T matrix_t;
|
||||
typedef typename matrix_t::Scalar scalar_t;
|
||||
|
||||
template<typename T>
|
||||
void exec(EIGEN_T * M_, NPE_PY_ARRAY_OBJECT * P_)
|
||||
{
|
||||
// Assumes M is already initialized.
|
||||
for(int r = 0; r < M_->rows(); r++)
|
||||
{
|
||||
for(int c = 0; c < M_->cols(); c++)
|
||||
{
|
||||
T * p = static_cast<T*>(PyArray_GETPTR2(P_, r, c));
|
||||
(*M_)(r,c) = static_cast<scalar_t>(*p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
template<typename EIGEN_T>
|
||||
struct CopyEigenToNumpyMatrix
|
||||
{
|
||||
typedef EIGEN_T matrix_t;
|
||||
typedef typename matrix_t::Scalar scalar_t;
|
||||
|
||||
template<typename T>
|
||||
void exec(EIGEN_T * M_, NPE_PY_ARRAY_OBJECT * P_)
|
||||
{
|
||||
// Assumes M is already initialized.
|
||||
for(int r = 0; r < M_->rows(); r++)
|
||||
{
|
||||
for(int c = 0; c < M_->cols(); c++)
|
||||
{
|
||||
T * p = static_cast<T*>(PyArray_GETPTR2(P_, r, c));
|
||||
*p = static_cast<T>((*M_)(r,c));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
template<typename EIGEN_T>
|
||||
struct CopyEigenToNumpyVector
|
||||
{
|
||||
typedef EIGEN_T matrix_t;
|
||||
typedef typename matrix_t::Scalar scalar_t;
|
||||
|
||||
template<typename T>
|
||||
void exec(EIGEN_T * M_, NPE_PY_ARRAY_OBJECT * P_)
|
||||
{
|
||||
// Assumes M is already initialized.
|
||||
for(int i = 0; i < M_->size(); i++)
|
||||
{
|
||||
T * p = static_cast<T*>(PyArray_GETPTR1(P_, i));
|
||||
*p = static_cast<T>((*M_)(i));
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
template<typename EIGEN_T>
|
||||
struct CopyNumpyToEigenVector
|
||||
{
|
||||
typedef EIGEN_T matrix_t;
|
||||
typedef typename matrix_t::Scalar scalar_t;
|
||||
|
||||
template<typename T>
|
||||
void exec(EIGEN_T * M_, NPE_PY_ARRAY_OBJECT * P_)
|
||||
{
|
||||
// Assumes M is already initialized.
|
||||
for(int i = 0; i < M_->size(); i++)
|
||||
{
|
||||
T * p = static_cast<T*>(PyArray_GETPTR1(P_, i));
|
||||
(*M_)(i) = static_cast<scalar_t>(*p);
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
// Crazy syntax in this function was found here:
|
||||
// http://stackoverflow.com/questions/1840253/c-template-member-function-of-template-class-called-from-template-function/1840318#1840318
|
||||
template< typename FUNCTOR_T>
|
||||
inline void numpyTypeDemuxer(typename FUNCTOR_T::matrix_t * M, NPE_PY_ARRAY_OBJECT * P)
|
||||
{
|
||||
FUNCTOR_T f;
|
||||
|
||||
int npyType = getNpyType(P);
|
||||
switch(npyType)
|
||||
{
|
||||
case NPY_BOOL:
|
||||
f.template exec<bool>(M,P);
|
||||
break;
|
||||
case NPY_BYTE:
|
||||
f.template exec<char>(M,P);
|
||||
break;
|
||||
case NPY_UBYTE:
|
||||
f.template exec<unsigned char>(M,P);
|
||||
break;
|
||||
case NPY_SHORT:
|
||||
f.template exec<short>(M,P);
|
||||
break;
|
||||
case NPY_USHORT:
|
||||
f.template exec<unsigned short>(M,P);
|
||||
break;
|
||||
case NPY_INT:
|
||||
f.template exec<int>(M,P);
|
||||
break;
|
||||
case NPY_UINT:
|
||||
f.template exec<unsigned int>(M,P);
|
||||
break;
|
||||
case NPY_LONG:
|
||||
f.template exec<long>(M,P);
|
||||
break;
|
||||
case NPY_ULONG:
|
||||
f.template exec<unsigned long>(M,P);
|
||||
break;
|
||||
case NPY_LONGLONG:
|
||||
f.template exec<long long>(M,P);
|
||||
break;
|
||||
case NPY_ULONGLONG:
|
||||
f.template exec<unsigned long long>(M,P);
|
||||
break;
|
||||
case NPY_FLOAT:
|
||||
f.template exec<float>(M,P);
|
||||
break;
|
||||
case NPY_DOUBLE:
|
||||
f.template exec<double>(M,P);
|
||||
break;
|
||||
case NPY_LONGDOUBLE:
|
||||
f.template exec<long double>(M,P);
|
||||
break;
|
||||
default:
|
||||
THROW_TYPE_ERROR("Unsupported type: " << npyTypeToString(npyType));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif /* NUMPY_EIGEN_COPY_ROUTINES_HPP */
|
||||
|
|
@ -0,0 +1,191 @@
|
|||
#ifndef NUMPY_EIGEN_TYPE_TRAITS_HPP
|
||||
#define NUMPY_EIGEN_TYPE_TRAITS_HPP
|
||||
|
||||
#define THROW_TYPE_ERROR(msg) \
|
||||
{ \
|
||||
std::stringstream type_error_ss; \
|
||||
type_error_ss << msg; \
|
||||
PyErr_SetString(PyExc_TypeError, type_error_ss.str().c_str()); \
|
||||
throw boost::python::error_already_set(); \
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////
|
||||
// TypeToNumPy
|
||||
// Defines helper functions based on the Eigen3 matrix type that
|
||||
// decide what conversions can happen Eigen3 --> NumPy
|
||||
// Also, converts a type to a NumPy enum.
|
||||
template<typename Scalar> struct TypeToNumPy;
|
||||
|
||||
template<> struct TypeToNumPy<int>
|
||||
{
|
||||
enum { NpyType = NPY_INT };
|
||||
static const char * npyString() { return "NPY_INT"; }
|
||||
static const char * typeString() { return "int"; }
|
||||
static bool canConvert(int type)
|
||||
{
|
||||
return type == NPY_INT || type == NPY_LONG;
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct TypeToNumPy<boost::int64_t>
|
||||
{
|
||||
enum { NpyType = NPY_LONG };
|
||||
static const char * npyString() { return "NPY_LONG"; }
|
||||
static const char * typeString() { return "long"; }
|
||||
static bool canConvert(int type)
|
||||
{
|
||||
return type == NPY_INT || type == NPY_LONG;
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct TypeToNumPy<boost::uint64_t>
|
||||
{
|
||||
enum { NpyType = NPY_UINT64 };
|
||||
static const char * npyString() { return "NPY_UINT64"; }
|
||||
static const char * typeString() { return "uint64"; }
|
||||
static bool canConvert(int type)
|
||||
{
|
||||
return type == NPY_UINT8 || type == NPY_USHORT ||
|
||||
type == NPY_UINT16 || type == NPY_UINT32 ||
|
||||
type == NPY_ULONG || type == NPY_ULONGLONG ||
|
||||
type == NPY_UINT64;
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct TypeToNumPy<unsigned char>
|
||||
{
|
||||
enum { NpyType = NPY_UBYTE };
|
||||
static const char * npyString() { return "NPY_UBYTE"; }
|
||||
static const char * typeString() { return "unsigned char"; }
|
||||
static bool canConvert(int type)
|
||||
{
|
||||
return type == NPY_UBYTE || type == NPY_BYTE || type == NPY_CHAR;
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct TypeToNumPy<char>
|
||||
{
|
||||
enum { NpyType = NPY_BYTE };
|
||||
static const char * npyString() { return "NPY_BYTE"; }
|
||||
static const char * typeString() { return "char"; }
|
||||
static bool canConvert(int type)
|
||||
{
|
||||
return type == NPY_UBYTE || type == NPY_BYTE || type == NPY_CHAR;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<> struct TypeToNumPy<float>
|
||||
{
|
||||
enum { NpyType = NPY_FLOAT };
|
||||
static const char * npyString() { return "NPY_FLOAT"; }
|
||||
static const char * typeString() { return "float"; }
|
||||
static bool canConvert(int type)
|
||||
{
|
||||
return type == NPY_INT || type == NPY_FLOAT || type == NPY_LONG;
|
||||
}
|
||||
};
|
||||
|
||||
template<> struct TypeToNumPy<double>
|
||||
{
|
||||
enum { NpyType = NPY_DOUBLE };
|
||||
static const char * npyString() { return "NPY_DOUBLE"; }
|
||||
static const char * typeString() { return "double"; }
|
||||
static bool canConvert(int type)
|
||||
{
|
||||
return type == NPY_INT || type == NPY_FLOAT || type == NPY_DOUBLE || type == NPY_LONG;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
inline int getNpyType(PyObject * obj_ptr){
|
||||
return PyArray_ObjectType(obj_ptr, 0);
|
||||
}
|
||||
|
||||
#ifdef NPY_1_7_API_VERSION
|
||||
inline int getNpyType(PyArrayObject * obj_ptr){
|
||||
PyArray_Descr * descr = PyArray_MinScalarType(obj_ptr);
|
||||
if (descr == NULL){
|
||||
THROW_TYPE_ERROR("Unsupported type: PyArray_MinScalarType returned null!");
|
||||
}
|
||||
return descr->type_num;
|
||||
}
|
||||
#endif
|
||||
|
||||
inline const char * npyTypeToString(int npyType)
|
||||
{
|
||||
switch(npyType)
|
||||
{
|
||||
case NPY_BOOL:
|
||||
return "NPY_BOOL";
|
||||
case NPY_BYTE:
|
||||
return "NPY_BYTE";
|
||||
case NPY_UBYTE:
|
||||
return "NPY_UBYTE";
|
||||
case NPY_SHORT:
|
||||
return "NPY_SHORT";
|
||||
case NPY_USHORT:
|
||||
return "NPY_USHORT";
|
||||
case NPY_INT:
|
||||
return "NPY_INT";
|
||||
case NPY_UINT:
|
||||
return "NPY_UINT";
|
||||
case NPY_LONG:
|
||||
return "NPY_LONG";
|
||||
case NPY_ULONG:
|
||||
return "NPY_ULONG";
|
||||
case NPY_LONGLONG:
|
||||
return "NPY_LONGLONG";
|
||||
case NPY_ULONGLONG:
|
||||
return "NPY_ULONGLONG";
|
||||
case NPY_FLOAT:
|
||||
return "NPY_FLOAT";
|
||||
case NPY_DOUBLE:
|
||||
return "NPY_DOUBLE";
|
||||
case NPY_LONGDOUBLE:
|
||||
return "NPY_LONGDOUBLE";
|
||||
case NPY_CFLOAT:
|
||||
return "NPY_CFLOAT";
|
||||
case NPY_CDOUBLE:
|
||||
return "NPY_CDOUBLE";
|
||||
case NPY_CLONGDOUBLE:
|
||||
return "NPY_CLONGDOUBLE";
|
||||
case NPY_OBJECT:
|
||||
return "NPY_OBJECT";
|
||||
case NPY_STRING:
|
||||
return "NPY_STRING";
|
||||
case NPY_UNICODE:
|
||||
return "NPY_UNICODE";
|
||||
case NPY_VOID:
|
||||
return "NPY_VOID";
|
||||
case NPY_NTYPES:
|
||||
return "NPY_NTYPES";
|
||||
case NPY_NOTYPE:
|
||||
return "NPY_NOTYPE";
|
||||
case NPY_CHAR:
|
||||
return "NPY_CHAR";
|
||||
default:
|
||||
return "Unknown type";
|
||||
}
|
||||
}
|
||||
|
||||
inline std::string npyArrayTypeString(NPE_PY_ARRAY_OBJECT * obj_ptr)
|
||||
{
|
||||
std::stringstream ss;
|
||||
int nd = PyArray_NDIM(obj_ptr);
|
||||
ss << "numpy.array<" << npyTypeToString(getNpyType(obj_ptr)) << ">[";
|
||||
if(nd > 0)
|
||||
{
|
||||
ss << PyArray_DIM(obj_ptr, 0);
|
||||
for(int i = 1; i < nd; i++)
|
||||
{
|
||||
ss << ", " << PyArray_DIM(obj_ptr, i);
|
||||
}
|
||||
}
|
||||
ss << "]";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
|
||||
#endif /* NUMPY_EIGEN_TYPE_TRAITS_HPP */
|
||||
|
|
@ -310,7 +310,7 @@ namespace gtsam {
|
|||
* Exponential map at identity - create a rotation from canonical coordinates
|
||||
* \f$ [R_x,R_y,R_z] \f$ using Rodrigues' formula
|
||||
*/
|
||||
static Rot3 Expmap(const Vector& v, OptionalJacobian<3,3> H = boost::none) {
|
||||
static Rot3 Expmap(const Vector3& v, OptionalJacobian<3,3> H = boost::none) {
|
||||
if(H) *H = Rot3::ExpmapDerivative(v);
|
||||
#ifdef GTSAM_USE_QUATERNIONS
|
||||
return traits<Quaternion>::Expmap(v);
|
||||
|
|
|
|||
|
|
@ -141,7 +141,20 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
pair<Matrix,Vector> GaussianBayesNet::matrix() const
|
||||
{
|
||||
return GaussianFactorGraph(*this).jacobian();
|
||||
GaussianFactorGraph factorGraph(*this);
|
||||
KeySet keys = factorGraph.keys();
|
||||
// add frontal keys in order
|
||||
Ordering ordering;
|
||||
BOOST_FOREACH (const sharedConditional& cg, *this)
|
||||
BOOST_FOREACH (Key key, cg->frontals()) {
|
||||
ordering.push_back(key);
|
||||
keys.erase(key);
|
||||
}
|
||||
// add remaining keys in case Bayes net is incomplete
|
||||
BOOST_FOREACH (Key key, keys)
|
||||
ordering.push_back(key);
|
||||
// return matrix and RHS
|
||||
return factorGraph.jacobian(ordering);
|
||||
}
|
||||
|
||||
///* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
@ -67,52 +67,55 @@ void PreintegratedCombinedMeasurements::resetIntegration() {
|
|||
//------------------------------------------------------------------------------
|
||||
void PreintegratedCombinedMeasurements::integrateMeasurement(
|
||||
const Vector3& measuredAcc, const Vector3& measuredOmega, double deltaT) {
|
||||
|
||||
const Matrix3 dRij = deltaXij_.R(); // expensive when quaternion
|
||||
const Matrix3 dRij = deltaRij().matrix(); // expensive when quaternion
|
||||
|
||||
// Update preintegrated measurements.
|
||||
Matrix3 D_incrR_integratedOmega; // Right jacobian computed at theta_incr
|
||||
Matrix9 F_9x9; // overall Jacobian wrt preintegrated measurements (df/dx)
|
||||
Matrix93 G1,G2;
|
||||
Matrix3 D_incrR_integratedOmega; // Right jacobian computed at theta_incr
|
||||
Matrix9 A; // overall Jacobian wrt preintegrated measurements (df/dx)
|
||||
Matrix93 B, C;
|
||||
PreintegrationBase::update(measuredAcc, measuredOmega, deltaT,
|
||||
&D_incrR_integratedOmega, &F_9x9, &G1, &G2);
|
||||
&D_incrR_integratedOmega, &A, &B, &C);
|
||||
|
||||
// Update preintegrated measurements covariance: as in [2] we consider a first order propagation that
|
||||
// can be seen as a prediction phase in an EKF framework. In this implementation, contrarily to [2] we
|
||||
// consider the uncertainty of the bias selection and we keep correlation between biases and preintegrated measurements
|
||||
/* ----------------------------------------------------------------------------------------------------------------------- */
|
||||
// Update preintegrated measurements covariance: as in [2] we consider a first
|
||||
// order propagation that can be seen as a prediction phase in an EKF
|
||||
// framework. In this implementation, contrarily to [2] we consider the
|
||||
// uncertainty of the bias selection and we keep correlation between biases
|
||||
// and preintegrated measurements
|
||||
|
||||
// Single Jacobians to propagate covariance
|
||||
Matrix3 H_vel_biasacc = -dRij * deltaT;
|
||||
Matrix3 H_angles_biasomega = -D_incrR_integratedOmega * deltaT;
|
||||
|
||||
// overall Jacobian wrt preintegrated measurements (df/dx)
|
||||
Eigen::Matrix<double,15,15> F;
|
||||
Eigen::Matrix<double, 15, 15> F;
|
||||
F.setZero();
|
||||
F.block<9, 9>(0, 0) = F_9x9;
|
||||
F.block<9, 9>(0, 0) = A;
|
||||
F.block<3, 3>(0, 12) = H_angles_biasomega;
|
||||
F.block<3, 3>(6, 9) = H_vel_biasacc;
|
||||
F.block<6, 6>(9, 9) = I_6x6;
|
||||
|
||||
// first order uncertainty propagation
|
||||
// Optimized matrix multiplication (1/deltaT) * G * measurementCovariance * G.transpose()
|
||||
Eigen::Matrix<double,15,15> G_measCov_Gt;
|
||||
// Optimized matrix multiplication (1/deltaT) * G * measurementCovariance *
|
||||
// G.transpose()
|
||||
Eigen::Matrix<double, 15, 15> G_measCov_Gt;
|
||||
G_measCov_Gt.setZero(15, 15);
|
||||
|
||||
// BLOCK DIAGONAL TERMS
|
||||
D_t_t(&G_measCov_Gt) = deltaT * p().integrationCovariance;
|
||||
D_v_v(&G_measCov_Gt) = (1 / deltaT) * (H_vel_biasacc)
|
||||
* (p().accelerometerCovariance + p().biasAccOmegaInit.block<3, 3>(0, 0))
|
||||
* (H_vel_biasacc.transpose());
|
||||
D_R_R(&G_measCov_Gt) = (1 / deltaT) * (H_angles_biasomega)
|
||||
* (p().gyroscopeCovariance + p().biasAccOmegaInit.block<3, 3>(3, 3))
|
||||
* (H_angles_biasomega.transpose());
|
||||
D_v_v(&G_measCov_Gt) =
|
||||
(1 / deltaT) * (H_vel_biasacc) *
|
||||
(p().accelerometerCovariance + p().biasAccOmegaInit.block<3, 3>(0, 0)) *
|
||||
(H_vel_biasacc.transpose());
|
||||
D_R_R(&G_measCov_Gt) =
|
||||
(1 / deltaT) * (H_angles_biasomega) *
|
||||
(p().gyroscopeCovariance + p().biasAccOmegaInit.block<3, 3>(3, 3)) *
|
||||
(H_angles_biasomega.transpose());
|
||||
D_a_a(&G_measCov_Gt) = deltaT * p().biasAccCovariance;
|
||||
D_g_g(&G_measCov_Gt) = deltaT * p().biasOmegaCovariance;
|
||||
|
||||
// OFF BLOCK DIAGONAL TERMS
|
||||
Matrix3 temp = H_vel_biasacc * p().biasAccOmegaInit.block<3, 3>(3, 0)
|
||||
* H_angles_biasomega.transpose();
|
||||
Matrix3 temp = H_vel_biasacc * p().biasAccOmegaInit.block<3, 3>(3, 0) *
|
||||
H_angles_biasomega.transpose();
|
||||
D_v_R(&G_measCov_Gt) = temp;
|
||||
D_R_v(&G_measCov_Gt) = temp.transpose();
|
||||
preintMeasCov_ = F * preintMeasCov_ * F.transpose() + G_measCov_Gt;
|
||||
|
|
|
|||
|
|
@ -124,17 +124,21 @@ public:
|
|||
//// return measurement - bias_gyro_temp - R_G_to_I * w_earth_rate_G;
|
||||
// }
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/// print with optional string
|
||||
void print(const std::string& s = "") const {
|
||||
// explicit printing for now.
|
||||
std::cout << s + ".Acc [" << biasAcc_.transpose() << "]" << std::endl;
|
||||
std::cout << s + ".Gyro [" << biasGyro_.transpose() << "]" << std::endl;
|
||||
/// ostream operator
|
||||
GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os,
|
||||
const ConstantBias& bias) {
|
||||
os << "acc = " << Point3(bias.accelerometer());
|
||||
os << " gyro = " << Point3(bias.gyroscope());
|
||||
return os;
|
||||
}
|
||||
|
||||
/// print with optional string
|
||||
void print(const std::string& s = "") const { std::cout << s << *this << std::endl; }
|
||||
|
||||
/** equality up to tolerance */
|
||||
inline bool equals(const ConstantBias& expected, double tol = 1e-5) const {
|
||||
return equal_with_abs_tol(biasAcc_, expected.biasAcc_, tol)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
@ -52,36 +52,33 @@ void PreintegratedImuMeasurements::resetIntegration() {
|
|||
//------------------------------------------------------------------------------
|
||||
void PreintegratedImuMeasurements::integrateMeasurement(
|
||||
const Vector3& measuredAcc, const Vector3& measuredOmega, double dt) {
|
||||
|
||||
static const Matrix93 Gi = (Matrix93() << Z_3x3, I_3x3, Z_3x3).finished();
|
||||
|
||||
// Update preintegrated measurements (also get Jacobian)
|
||||
Matrix9 F; // overall Jacobian wrt preintegrated measurements (df/dx)
|
||||
Matrix93 G1, G2;
|
||||
Matrix9 A; // overall Jacobian wrt preintegrated measurements (df/dx)
|
||||
Matrix93 B, C;
|
||||
Matrix3 D_incrR_integratedOmega;
|
||||
PreintegrationBase::update(measuredAcc, measuredOmega, dt,
|
||||
&D_incrR_integratedOmega, &F, &G1, &G2);
|
||||
&D_incrR_integratedOmega, &A, &B, &C);
|
||||
|
||||
// first order covariance propagation:
|
||||
// as in [2] we consider a first order propagation that can be seen as a prediction phase in EKF
|
||||
/* --------------------------------------------------------------------------------------------*/
|
||||
// preintMeasCov = F * preintMeasCov * F.transpose() + G * (1/deltaT) * measurementCovariance * G'
|
||||
// NOTE 1: (1/deltaT) allows to pass from continuous time noise to discrete time noise
|
||||
// measurementCovariance_discrete = measurementCovariance_contTime * (1/deltaT)
|
||||
#ifdef OLD_JACOBIAN_CALCULATION
|
||||
Matrix9 G;
|
||||
G << G1, Gi, G2;
|
||||
Matrix9 Cov;
|
||||
Cov << p().accelerometerCovariance / dt, Z_3x3, Z_3x3,
|
||||
Z_3x3, p().integrationCovariance * dt, Z_3x3,
|
||||
Z_3x3, Z_3x3, p().gyroscopeCovariance / dt;
|
||||
preintMeasCov_ = F * preintMeasCov_ * F.transpose() + G * Cov * G.transpose();
|
||||
#else
|
||||
preintMeasCov_ = F * preintMeasCov_ * F.transpose()
|
||||
+ Gi * (p().integrationCovariance * dt) * Gi.transpose() // NOTE(frank): (Gi*dt)*(C/dt)*(Gi'*dt)
|
||||
+ G1 * (p().accelerometerCovariance / dt) * G1.transpose()
|
||||
+ G2 * (p().gyroscopeCovariance / dt) * G2.transpose();
|
||||
#endif
|
||||
// as in [2] we consider a first order propagation that can be seen as a
|
||||
// prediction phase in EKF
|
||||
|
||||
// propagate uncertainty
|
||||
// TODO(frank): use noiseModel routine so we can have arbitrary noise models.
|
||||
const Matrix3& aCov = p().accelerometerCovariance;
|
||||
const Matrix3& wCov = p().gyroscopeCovariance;
|
||||
const Matrix3& iCov = p().integrationCovariance;
|
||||
|
||||
// NOTE(luca): (1/dt) allows to pass from continuous time noise to discrete
|
||||
// time noise
|
||||
// measurementCovariance_discrete = measurementCovariance_contTime/dt
|
||||
preintMeasCov_ = A * preintMeasCov_ * A.transpose();
|
||||
preintMeasCov_.noalias() += B * (aCov / dt) * B.transpose();
|
||||
preintMeasCov_.noalias() += C * (wCov / dt) * C.transpose();
|
||||
|
||||
// NOTE(frank): (Gi*dt)*(C/dt)*(Gi'*dt)
|
||||
static const Matrix93 Gi = (Matrix93() << Z_3x3, I_3x3, Z_3x3).finished();
|
||||
preintMeasCov_.noalias() += Gi * (iCov * dt) * Gi.transpose();
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
|
@ -125,17 +122,22 @@ gtsam::NonlinearFactor::shared_ptr ImuFactor::clone() const {
|
|||
gtsam::NonlinearFactor::shared_ptr(new This(*this)));
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
std::ostream& operator<<(std::ostream& os, const ImuFactor& f) {
|
||||
os << " preintegrated measurements:\n" << f._PIM_;
|
||||
;
|
||||
// Print standard deviations on covariance only
|
||||
os << " noise model sigmas: " << f.noiseModel_->sigmas().transpose();
|
||||
return os;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
void ImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const {
|
||||
cout << s << "ImuFactor(" << keyFormatter(this->key1()) << ","
|
||||
<< keyFormatter(this->key2()) << "," << keyFormatter(this->key3()) << ","
|
||||
<< keyFormatter(this->key4()) << "," << keyFormatter(this->key5())
|
||||
<< ")\n";
|
||||
Base::print("");
|
||||
_PIM_.print(" preintegrated measurements:");
|
||||
// Print standard deviations on covariance only
|
||||
cout << " noise model sigmas: " << this->noiseModel_->sigmas().transpose()
|
||||
<< endl;
|
||||
cout << *this << endl;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
|
|
|||
|
|
@ -201,14 +201,13 @@ public:
|
|||
/// @return a deep copy of this factor
|
||||
virtual gtsam::NonlinearFactor::shared_ptr clone() const;
|
||||
|
||||
/** implement functions needed for Testable */
|
||||
|
||||
/// print
|
||||
/// @name Testable
|
||||
/// @{
|
||||
GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const ImuFactor&);
|
||||
virtual void print(const std::string& s, const KeyFormatter& keyFormatter =
|
||||
DefaultKeyFormatter) const;
|
||||
|
||||
/// equals
|
||||
virtual bool equals(const NonlinearFactor& expected, double tol = 1e-9) const;
|
||||
/// @}
|
||||
|
||||
/** Access the preintegrated measurements. */
|
||||
|
||||
|
|
|
|||
|
|
@ -74,11 +74,17 @@ Matrix7 NavState::matrix() const {
|
|||
return T;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
ostream& operator<<(ostream& os, const NavState& state) {
|
||||
os << "R:" << state.attitude();
|
||||
os << "p:" << state.position() << endl;
|
||||
os << "v:" << Point3(state.velocity()) << endl;
|
||||
return os;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
void NavState::print(const string& s) const {
|
||||
attitude().print(s + ".R");
|
||||
position().print(s + ".p");
|
||||
gtsam::print((Vector) v_, s + ".v");
|
||||
cout << s << *this << endl;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
|
@ -75,18 +75,9 @@ public:
|
|||
/// @name Component Access
|
||||
/// @{
|
||||
|
||||
inline const Rot3& attitude() const {
|
||||
return R_;
|
||||
}
|
||||
inline const Point3& position() const {
|
||||
return t_;
|
||||
}
|
||||
inline const Velocity3& velocity() const {
|
||||
return v_;
|
||||
}
|
||||
const Rot3& attitude(OptionalJacobian<3, 9> H) const;
|
||||
const Point3& position(OptionalJacobian<3, 9> H) const;
|
||||
const Velocity3& velocity(OptionalJacobian<3, 9> H) const;
|
||||
const Rot3& attitude(OptionalJacobian<3, 9> H = boost::none) const;
|
||||
const Point3& position(OptionalJacobian<3, 9> H = boost::none) const;
|
||||
const Velocity3& velocity(OptionalJacobian<3, 9> H = boost::none) const;
|
||||
|
||||
const Pose3 pose() const {
|
||||
return Pose3(attitude(), position());
|
||||
|
|
@ -124,6 +115,9 @@ public:
|
|||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/// Output stream operator
|
||||
GTSAM_EXPORT friend std::ostream &operator<<(std::ostream &os, const NavState& state);
|
||||
|
||||
/// print
|
||||
void print(const std::string& s = "") const;
|
||||
|
||||
|
|
@ -229,6 +223,8 @@ public:
|
|||
false, OptionalJacobian<9, 9> H1 = boost::none,
|
||||
OptionalJacobian<9, 9> H2 = boost::none) const;
|
||||
|
||||
/// @}
|
||||
|
||||
private:
|
||||
/// @{
|
||||
/// serialization
|
||||
|
|
|
|||
|
|
@ -28,10 +28,17 @@ using namespace std;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
PreintegrationBase::PreintegrationBase(const boost::shared_ptr<Params>& p,
|
||||
const Bias& biasHat)
|
||||
: p_(p), biasHat_(biasHat), deltaTij_(0.0) {
|
||||
resetIntegration();
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
void PreintegrationBase::resetIntegration() {
|
||||
deltaTij_ = 0.0;
|
||||
deltaXij_ = NavState();
|
||||
deltaXij_ = TangentVector();
|
||||
delRdelBiasOmega_ = Z_3x3;
|
||||
delPdelBiasAcc_ = Z_3x3;
|
||||
delPdelBiasOmega_ = Z_3x3;
|
||||
|
|
@ -39,14 +46,20 @@ void PreintegrationBase::resetIntegration() {
|
|||
delVdelBiasOmega_ = Z_3x3;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
ostream& operator<<(ostream& os, const PreintegrationBase& pim) {
|
||||
os << " deltaTij " << pim.deltaTij_ << endl;
|
||||
os << " deltaRij " << Point3(pim.theta()) << endl;
|
||||
os << " deltaPij " << Point3(pim.deltaPij()) << endl;
|
||||
os << " deltaVij " << Point3(pim.deltaVij()) << endl;
|
||||
os << " gyrobias " << Point3(pim.biasHat_.gyroscope()) << endl;
|
||||
os << " acc_bias " << Point3(pim.biasHat_.accelerometer()) << endl;
|
||||
return os;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
void PreintegrationBase::print(const string& s) const {
|
||||
cout << s << endl;
|
||||
cout << " deltaTij [" << deltaTij_ << "]" << endl;
|
||||
cout << " deltaRij.ypr = (" << deltaRij().ypr().transpose() << ")" << endl;
|
||||
cout << " deltaPij [ " << deltaPij().transpose() << " ]" << endl;
|
||||
cout << " deltaVij [ " << deltaVij().transpose() << " ]" << endl;
|
||||
biasHat_.print(" biasHat");
|
||||
cout << s << *this << endl;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
|
|
@ -54,8 +67,8 @@ bool PreintegrationBase::equals(const PreintegrationBase& other,
|
|||
double tol) const {
|
||||
const bool params_match = p_->equals(*other.p_, tol);
|
||||
return params_match && fabs(deltaTij_ - other.deltaTij_) < tol
|
||||
&& deltaXij_.equals(other.deltaXij_, tol)
|
||||
&& biasHat_.equals(other.biasHat_, tol)
|
||||
&& equal_with_abs_tol(deltaXij_.vector(), other.deltaXij_.vector(), tol)
|
||||
&& equal_with_abs_tol(delRdelBiasOmega_, other.delRdelBiasOmega_, tol)
|
||||
&& equal_with_abs_tol(delPdelBiasAcc_, other.delPdelBiasAcc_, tol)
|
||||
&& equal_with_abs_tol(delPdelBiasOmega_, other.delPdelBiasOmega_, tol)
|
||||
|
|
@ -70,28 +83,25 @@ pair<Vector3, Vector3> PreintegrationBase::correctMeasurementsByBiasAndSensorPos
|
|||
OptionalJacobian<3, 3> D_correctedAcc_measuredOmega,
|
||||
OptionalJacobian<3, 3> D_correctedOmega_measuredOmega) const {
|
||||
|
||||
// Correct for bias in the sensor frame
|
||||
// Correct for bias in the sensor frame
|
||||
Vector3 j_correctedAcc = biasHat_.correctAccelerometer(j_measuredAcc);
|
||||
Vector3 j_correctedOmega = biasHat_.correctGyroscope(j_measuredOmega);
|
||||
|
||||
// Compensate for sensor-body displacement if needed: we express the quantities
|
||||
// (originally in the IMU frame) into the body frame
|
||||
// Equations below assume the "body" frame is the CG
|
||||
// Compensate for sensor-body displacement if needed: we express the quantities
|
||||
// (originally in the IMU frame) into the body frame
|
||||
// Equations below assume the "body" frame is the CG
|
||||
if (p().body_P_sensor) {
|
||||
// Correct omega to rotation rate vector in the body frame
|
||||
// Get sensor to body rotation matrix
|
||||
const Matrix3 bRs = p().body_P_sensor->rotation().matrix();
|
||||
j_correctedOmega = bRs * j_correctedOmega;
|
||||
|
||||
// Correct acceleration
|
||||
// Convert angular velocity and acceleration from sensor to body frame
|
||||
j_correctedOmega = bRs * j_correctedOmega;
|
||||
j_correctedAcc = bRs * j_correctedAcc;
|
||||
|
||||
// Jacobians
|
||||
if (D_correctedAcc_measuredAcc)
|
||||
*D_correctedAcc_measuredAcc = bRs;
|
||||
if (D_correctedAcc_measuredOmega)
|
||||
*D_correctedAcc_measuredOmega = Matrix3::Zero();
|
||||
if (D_correctedOmega_measuredOmega)
|
||||
*D_correctedOmega_measuredOmega = bRs;
|
||||
if (D_correctedAcc_measuredAcc) *D_correctedAcc_measuredAcc = bRs;
|
||||
if (D_correctedAcc_measuredOmega) *D_correctedAcc_measuredOmega = Z_3x3;
|
||||
if (D_correctedOmega_measuredOmega) *D_correctedOmega_measuredOmega = bRs;
|
||||
|
||||
// Centrifugal acceleration
|
||||
const Vector3 b_arm = p().body_P_sensor->translation().vector();
|
||||
|
|
@ -101,6 +111,7 @@ pair<Vector3, Vector3> PreintegrationBase::correctMeasurementsByBiasAndSensorPos
|
|||
const Matrix3 body_Omega_body = skewSymmetric(j_correctedOmega);
|
||||
const Vector3 b_velocity_bs = body_Omega_body * b_arm; // magnitude: omega * arm
|
||||
j_correctedAcc -= body_Omega_body * b_velocity_bs;
|
||||
|
||||
// Update derivative: centrifugal causes the correlation between acc and omega!!!
|
||||
if (D_correctedAcc_measuredOmega) {
|
||||
double wdp = j_correctedOmega.dot(b_arm);
|
||||
|
|
@ -111,63 +122,107 @@ pair<Vector3, Vector3> PreintegrationBase::correctMeasurementsByBiasAndSensorPos
|
|||
}
|
||||
}
|
||||
|
||||
// Do update in one fell swoop
|
||||
return make_pair(j_correctedAcc, j_correctedOmega);
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
NavState PreintegrationBase::updatedDeltaXij(const Vector3& j_measuredAcc,
|
||||
const Vector3& j_measuredOmega, const double dt,
|
||||
OptionalJacobian<9, 9> D_updated_current,
|
||||
OptionalJacobian<9, 3> D_updated_measuredAcc,
|
||||
OptionalJacobian<9, 3> D_updated_measuredOmega) const {
|
||||
// See extensive discussion in ImuFactor.lyx
|
||||
PreintegrationBase::TangentVector PreintegrationBase::UpdateEstimate(
|
||||
const Vector3& a_body, const Vector3& w_body, double dt,
|
||||
const TangentVector& zeta, OptionalJacobian<9, 9> A,
|
||||
OptionalJacobian<9, 3> B, OptionalJacobian<9, 3> C) {
|
||||
// Calculate exact mean propagation
|
||||
Matrix3 H;
|
||||
const Matrix3 R = Rot3::Expmap(zeta.theta(), H).matrix();
|
||||
const Matrix3 invH = H.inverse();
|
||||
const Vector3 a_nav = R * a_body;
|
||||
const double dt22 = 0.5 * dt * dt;
|
||||
|
||||
Vector3 j_correctedAcc, j_correctedOmega;
|
||||
Matrix3 D_correctedAcc_measuredAcc, //
|
||||
D_correctedAcc_measuredOmega, //
|
||||
D_correctedOmega_measuredOmega;
|
||||
bool needDerivs = D_updated_measuredAcc && D_updated_measuredOmega
|
||||
&& p().body_P_sensor;
|
||||
boost::tie(j_correctedAcc, j_correctedOmega) =
|
||||
correctMeasurementsByBiasAndSensorPose(j_measuredAcc, j_measuredOmega,
|
||||
(needDerivs ? &D_correctedAcc_measuredAcc : 0),
|
||||
(needDerivs ? &D_correctedAcc_measuredOmega : 0),
|
||||
(needDerivs ? &D_correctedOmega_measuredOmega : 0));
|
||||
// Do update in one fell swoop
|
||||
Matrix93 D_updated_correctedAcc, D_updated_correctedOmega;
|
||||
NavState updated = deltaXij_.update(j_correctedAcc, j_correctedOmega, dt,
|
||||
D_updated_current,
|
||||
(needDerivs ? D_updated_correctedAcc : D_updated_measuredAcc),
|
||||
(needDerivs ? D_updated_correctedOmega : D_updated_measuredOmega));
|
||||
if (needDerivs) {
|
||||
*D_updated_measuredAcc = D_updated_correctedAcc
|
||||
* D_correctedAcc_measuredAcc;
|
||||
*D_updated_measuredOmega = D_updated_correctedOmega
|
||||
* D_correctedOmega_measuredOmega;
|
||||
if (!p().body_P_sensor->translation().vector().isZero()) {
|
||||
*D_updated_measuredOmega += D_updated_correctedAcc
|
||||
* D_correctedAcc_measuredOmega;
|
||||
}
|
||||
TangentVector zetaPlus(zeta.theta() + invH * w_body * dt,
|
||||
zeta.position() + zeta.velocity() * dt + a_nav * dt22,
|
||||
zeta.velocity() + a_nav * dt);
|
||||
|
||||
if (A) {
|
||||
// First order (small angle) approximation of derivative of invH*w:
|
||||
const Matrix3 invHw_H_theta = skewSymmetric(-0.5 * w_body);
|
||||
|
||||
// Exact derivative of R*a with respect to theta:
|
||||
const Matrix3 a_nav_H_theta = R * skewSymmetric(-a_body) * H;
|
||||
|
||||
A->setIdentity();
|
||||
A->block<3, 3>(0, 0).noalias() += invHw_H_theta * dt;
|
||||
A->block<3, 3>(3, 0) = a_nav_H_theta * dt22;
|
||||
A->block<3, 3>(3, 6) = I_3x3 * dt;
|
||||
A->block<3, 3>(6, 0) = a_nav_H_theta * dt;
|
||||
}
|
||||
if (B) {
|
||||
B->block<3, 3>(0, 0) = Z_3x3;
|
||||
B->block<3, 3>(3, 0) = R * dt22;
|
||||
B->block<3, 3>(6, 0) = R * dt;
|
||||
}
|
||||
if (C) {
|
||||
C->block<3, 3>(0, 0) = invH * dt;
|
||||
C->block<3, 3>(3, 0) = Z_3x3;
|
||||
C->block<3, 3>(6, 0) = Z_3x3;
|
||||
}
|
||||
|
||||
return zetaPlus;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
PreintegrationBase::TangentVector PreintegrationBase::updatedDeltaXij(
|
||||
const Vector3& measuredAcc, const Vector3& measuredOmega, double dt,
|
||||
OptionalJacobian<9, 9> A, OptionalJacobian<9, 3> B,
|
||||
OptionalJacobian<9, 3> C) const {
|
||||
if (!p().body_P_sensor) {
|
||||
// Correct for bias in the sensor frame
|
||||
Vector3 correctedAcc = biasHat_.correctAccelerometer(measuredAcc);
|
||||
Vector3 correctedOmega = biasHat_.correctGyroscope(measuredOmega);
|
||||
|
||||
// Do update in one fell swoop
|
||||
return UpdateEstimate(correctedAcc, correctedOmega, dt, deltaXij_, A, B, C);
|
||||
} else {
|
||||
// More complicated derivatives in case of sensor displacement
|
||||
Vector3 correctedAcc, correctedOmega;
|
||||
Matrix3 D_correctedAcc_measuredAcc, D_correctedAcc_measuredOmega,
|
||||
D_correctedOmega_measuredOmega;
|
||||
boost::tie(correctedAcc, correctedOmega) =
|
||||
correctMeasurementsByBiasAndSensorPose(
|
||||
measuredAcc, measuredOmega, (B ? &D_correctedAcc_measuredAcc : 0),
|
||||
(C ? &D_correctedAcc_measuredOmega : 0),
|
||||
(C ? &D_correctedOmega_measuredOmega : 0));
|
||||
|
||||
// Do update in one fell swoop
|
||||
Matrix93 D_updated_correctedAcc, D_updated_correctedOmega;
|
||||
const PreintegrationBase::TangentVector updated =
|
||||
UpdateEstimate(correctedAcc, correctedOmega, dt, deltaXij_, A,
|
||||
((B || C) ? &D_updated_correctedAcc : 0),
|
||||
(C ? &D_updated_correctedOmega : 0));
|
||||
if (B) *B = D_updated_correctedAcc* D_correctedAcc_measuredAcc;
|
||||
if (C) {
|
||||
*C = D_updated_correctedOmega* D_correctedOmega_measuredOmega;
|
||||
if (!p().body_P_sensor->translation().vector().isZero())
|
||||
*C += D_updated_correctedAcc* D_correctedAcc_measuredOmega;
|
||||
}
|
||||
return updated;
|
||||
}
|
||||
return updated;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
void PreintegrationBase::update(const Vector3& j_measuredAcc,
|
||||
const Vector3& j_measuredOmega, const double dt,
|
||||
Matrix3* D_incrR_integratedOmega, Matrix9* D_updated_current,
|
||||
Matrix93* D_updated_measuredAcc, Matrix93* D_updated_measuredOmega) {
|
||||
const Vector3& j_measuredOmega, double dt,
|
||||
Matrix3* D_incrR_integratedOmega, Matrix9* A,
|
||||
Matrix93* B, Matrix93* C) {
|
||||
// Save current rotation for updating Jacobians
|
||||
const Rot3 oldRij = deltaRij();
|
||||
|
||||
// Save current rotation for updating Jacobians
|
||||
const Rot3 oldRij = deltaXij_.attitude();
|
||||
|
||||
// Do update
|
||||
// Do update
|
||||
deltaTij_ += dt;
|
||||
deltaXij_ = updatedDeltaXij(j_measuredAcc, j_measuredOmega, dt,
|
||||
D_updated_current, D_updated_measuredAcc, D_updated_measuredOmega); // functional
|
||||
deltaXij_ = updatedDeltaXij(j_measuredAcc, j_measuredOmega, dt, A, B, C);
|
||||
|
||||
// Update Jacobians
|
||||
// TODO(frank): we are repeating some computation here: accessible in F ?
|
||||
// Update Jacobians
|
||||
// TODO(frank): we are repeating some computation here: accessible in A ?
|
||||
// Possibly: derivatives are just -B and -C ??
|
||||
Vector3 j_correctedAcc, j_correctedOmega;
|
||||
boost::tie(j_correctedAcc, j_correctedOmega) =
|
||||
correctMeasurementsByBiasAndSensorPose(j_measuredAcc, j_measuredOmega);
|
||||
|
|
@ -204,8 +259,8 @@ Vector9 PreintegrationBase::biasCorrectedDelta(
|
|||
|
||||
Vector9 xi;
|
||||
Matrix3 D_dR_correctedRij;
|
||||
// TODO(frank): could line below be simplified? It is equivalent to
|
||||
// LogMap(deltaRij_.compose(Expmap(biasInducedOmega)))
|
||||
// TODO(frank): could line below be simplified? It is equivalent to
|
||||
// LogMap(deltaRij_.compose(Expmap(biasInducedOmega)))
|
||||
NavState::dR(xi) = Rot3::Logmap(correctedRij, H ? &D_dR_correctedRij : 0);
|
||||
NavState::dP(xi) = deltaPij() + delPdelBiasAcc_ * biasIncr.accelerometer()
|
||||
+ delPdelBiasOmega_ * biasIncr.gyroscope();
|
||||
|
|
@ -224,29 +279,55 @@ Vector9 PreintegrationBase::biasCorrectedDelta(
|
|||
|
||||
//------------------------------------------------------------------------------
|
||||
NavState PreintegrationBase::predict(const NavState& state_i,
|
||||
const imuBias::ConstantBias& bias_i, OptionalJacobian<9, 9> H1,
|
||||
OptionalJacobian<9, 6> H2) const {
|
||||
// correct for bias
|
||||
const imuBias::ConstantBias& bias_i,
|
||||
OptionalJacobian<9, 9> H1,
|
||||
OptionalJacobian<9, 6> H2) const {
|
||||
// TODO(frank): make sure this stuff is still correct
|
||||
Matrix96 D_biasCorrected_bias;
|
||||
Vector9 biasCorrected = biasCorrectedDelta(bias_i,
|
||||
H2 ? &D_biasCorrected_bias : 0);
|
||||
Vector9 biasCorrected =
|
||||
biasCorrectedDelta(bias_i, H2 ? &D_biasCorrected_bias : 0);
|
||||
|
||||
// integrate on tangent space
|
||||
// Correct for initial velocity and gravity
|
||||
Matrix9 D_delta_state, D_delta_biasCorrected;
|
||||
Vector9 xi = state_i.correctPIM(biasCorrected, deltaTij_, p().n_gravity,
|
||||
p().omegaCoriolis, p().use2ndOrderCoriolis, H1 ? &D_delta_state : 0,
|
||||
H2 ? &D_delta_biasCorrected : 0);
|
||||
p().omegaCoriolis, p().use2ndOrderCoriolis,
|
||||
H1 ? &D_delta_state : 0,
|
||||
H2 ? &D_delta_biasCorrected : 0);
|
||||
|
||||
// Use retract to get back to NavState manifold
|
||||
// Use retract to get back to NavState manifold
|
||||
Matrix9 D_predict_state, D_predict_delta;
|
||||
NavState state_j = state_i.retract(xi, D_predict_state, D_predict_delta);
|
||||
if (H1)
|
||||
*H1 = D_predict_state + D_predict_delta * D_delta_state;
|
||||
if (H2)
|
||||
*H2 = D_predict_delta * D_delta_biasCorrected * D_biasCorrected_bias;
|
||||
if (H1) *H1 = D_predict_state + D_predict_delta* D_delta_state;
|
||||
if (H2) *H2 = D_predict_delta* D_delta_biasCorrected * D_biasCorrected_bias;
|
||||
return state_j;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
Vector9 PreintegrationBase::computeError(const NavState& state_i,
|
||||
const NavState& state_j,
|
||||
const imuBias::ConstantBias& bias_i,
|
||||
OptionalJacobian<9, 9> H1,
|
||||
OptionalJacobian<9, 9> H2,
|
||||
OptionalJacobian<9, 6> H3) const {
|
||||
// Predict state at time j
|
||||
Matrix9 D_predict_state_i;
|
||||
Matrix96 D_predict_bias_i;
|
||||
NavState predictedState_j = predict(
|
||||
state_i, bias_i, H1 ? &D_predict_state_i : 0, H3 ? &D_predict_bias_i : 0);
|
||||
|
||||
// Calculate error
|
||||
Matrix9 D_error_state_j, D_error_predict;
|
||||
Vector9 error =
|
||||
state_j.localCoordinates(predictedState_j, H2 ? &D_error_state_j : 0,
|
||||
H1 || H3 ? &D_error_predict : 0);
|
||||
|
||||
if (H1) *H1 << D_error_predict* D_predict_state_i;
|
||||
if (H2) *H2 << D_error_state_j;
|
||||
if (H3) *H3 << D_error_predict* D_predict_bias_i;
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
//------------------------------------------------------------------------------
|
||||
Vector9 PreintegrationBase::computeErrorAndJacobians(const Pose3& pose_i,
|
||||
const Vector3& vel_i, const Pose3& pose_j, const Vector3& vel_j,
|
||||
|
|
@ -254,38 +335,25 @@ Vector9 PreintegrationBase::computeErrorAndJacobians(const Pose3& pose_i,
|
|||
OptionalJacobian<9, 3> H2, OptionalJacobian<9, 6> H3,
|
||||
OptionalJacobian<9, 3> H4, OptionalJacobian<9, 6> H5) const {
|
||||
|
||||
// Note that derivative of constructors below is not identity for velocity, but
|
||||
// a 9*3 matrix == Z_3x3, Z_3x3, state.R().transpose()
|
||||
// Note that derivative of constructors below is not identity for velocity, but
|
||||
// a 9*3 matrix == Z_3x3, Z_3x3, state.R().transpose()
|
||||
NavState state_i(pose_i, vel_i);
|
||||
NavState state_j(pose_j, vel_j);
|
||||
|
||||
/// Predict state at time j
|
||||
Matrix99 D_predict_state_i;
|
||||
Matrix96 D_predict_bias_i;
|
||||
NavState predictedState_j = predict(state_i, bias_i,
|
||||
H1 || H2 ? &D_predict_state_i : 0, H5 ? &D_predict_bias_i : 0);
|
||||
// Predict state at time j
|
||||
Matrix9 D_error_state_i, D_error_state_j;
|
||||
Vector9 error = computeError(state_i, state_j, bias_i,
|
||||
H1 || H2 ? &D_error_state_i : 0, H3 || H4 ? &D_error_state_j : 0, H5);
|
||||
|
||||
Matrix9 D_error_state_j, D_error_predict;
|
||||
Vector9 error = state_j.localCoordinates(predictedState_j,
|
||||
H3 || H4 ? &D_error_state_j : 0, H1 || H2 || H5 ? &D_error_predict : 0);
|
||||
|
||||
// Separate out derivatives in terms of 5 arguments
|
||||
// Note that doing so requires special treatment of velocities, as when treated as
|
||||
// separate variables the retract applied will not be the semi-direct product in NavState
|
||||
// Instead, the velocities in nav are updated using a straight addition
|
||||
// This is difference is accounted for by the R().transpose calls below
|
||||
if (H1)
|
||||
*H1 << D_error_predict * D_predict_state_i.leftCols<6>();
|
||||
if (H2)
|
||||
*H2
|
||||
<< D_error_predict * D_predict_state_i.rightCols<3>()
|
||||
* state_i.R().transpose();
|
||||
if (H3)
|
||||
*H3 << D_error_state_j.leftCols<6>();
|
||||
if (H4)
|
||||
*H4 << D_error_state_j.rightCols<3>() * state_j.R().transpose();
|
||||
if (H5)
|
||||
*H5 << D_error_predict * D_predict_bias_i;
|
||||
// Separate out derivatives in terms of 5 arguments
|
||||
// Note that doing so requires special treatment of velocities, as when treated as
|
||||
// separate variables the retract applied will not be the semi-direct product in NavState
|
||||
// Instead, the velocities in nav are updated using a straight addition
|
||||
// This is difference is accounted for by the R().transpose calls below
|
||||
if (H1) *H1 << D_error_state_i.leftCols<6>();
|
||||
if (H2) *H2 << D_error_state_i.rightCols<3>() * state_i.R().transpose();
|
||||
if (H3) *H3 << D_error_state_j.leftCols<6>();
|
||||
if (H4) *H4 << D_error_state_j.rightCols<3>() * state_j.R().transpose();
|
||||
|
||||
return error;
|
||||
}
|
||||
|
|
@ -307,5 +375,4 @@ PoseVelocityBias PreintegrationBase::predict(const Pose3& pose_i,
|
|||
#endif
|
||||
//------------------------------------------------------------------------------
|
||||
|
||||
}
|
||||
/// namespace gtsam
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -24,6 +24,9 @@
|
|||
#include <gtsam/navigation/PreintegrationParams.h>
|
||||
#include <gtsam/navigation/NavState.h>
|
||||
#include <gtsam/navigation/ImuBias.h>
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
|
||||
#include <iosfwd>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
|
@ -53,15 +56,56 @@ struct PoseVelocityBias {
|
|||
* to access, print, and compare them.
|
||||
*/
|
||||
class PreintegrationBase {
|
||||
public:
|
||||
typedef imuBias::ConstantBias Bias;
|
||||
typedef PreintegrationParams Params;
|
||||
|
||||
protected:
|
||||
/// The IMU is integrated in the tangent space, represented by a Vector9
|
||||
/// This small inner class provides some convenient constructors and efficient
|
||||
/// access to the orientation, position, and velocity components
|
||||
class TangentVector {
|
||||
Vector9 v_;
|
||||
typedef const Vector9 constV9;
|
||||
|
||||
public:
|
||||
TangentVector() { v_.setZero(); }
|
||||
TangentVector(const Vector9& v) : v_(v) {}
|
||||
TangentVector(const Vector3& theta, const Vector3& pos,
|
||||
const Vector3& vel) {
|
||||
this->theta() = theta;
|
||||
this->position() = pos;
|
||||
this->velocity() = vel;
|
||||
}
|
||||
|
||||
const Vector9& vector() const { return v_; }
|
||||
|
||||
Eigen::Block<Vector9, 3, 1> theta() { return v_.segment<3>(0); }
|
||||
Eigen::Block<constV9, 3, 1> theta() const { return v_.segment<3>(0); }
|
||||
|
||||
Eigen::Block<Vector9, 3, 1> position() { return v_.segment<3>(3); }
|
||||
Eigen::Block<constV9, 3, 1> position() const { return v_.segment<3>(3); }
|
||||
|
||||
Eigen::Block<Vector9, 3, 1> velocity() { return v_.segment<3>(6); }
|
||||
Eigen::Block<constV9, 3, 1> velocity() const { return v_.segment<3>(6); }
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||
namespace bs = ::boost::serialization;
|
||||
ar & bs::make_nvp("v_", bs::make_array(v_.data(), v_.size()));
|
||||
}
|
||||
};
|
||||
|
||||
protected:
|
||||
|
||||
/// Parameters. Declared mutable only for deprecated predict method.
|
||||
/// TODO(frank): make const once deprecated method is removed
|
||||
#ifdef ALLOW_DEPRECATED_IN_GTSAM4
|
||||
mutable
|
||||
#endif
|
||||
boost::shared_ptr<PreintegrationParams> p_;
|
||||
boost::shared_ptr<Params> p_;
|
||||
|
||||
/// Acceleration and gyro bias used for preintegration
|
||||
imuBias::ConstantBias biasHat_;
|
||||
|
|
@ -74,7 +118,8 @@ protected:
|
|||
* Note: relative position does not take into account velocity at time i, see deltap+, in [2]
|
||||
* Note: velocity is now also in frame i, as opposed to deltaVij in [2]
|
||||
*/
|
||||
NavState deltaXij_;
|
||||
/// Current estimate of deltaXij_k
|
||||
TangentVector deltaXij_;
|
||||
|
||||
Matrix3 delRdelBiasOmega_; ///< Jacobian of preintegrated rotation w.r.t. angular rate bias
|
||||
Matrix3 delPdelBiasAcc_; ///< Jacobian of preintegrated position w.r.t. acceleration bias
|
||||
|
|
@ -88,7 +133,6 @@ protected:
|
|||
}
|
||||
|
||||
public:
|
||||
|
||||
/// @name Constructors
|
||||
/// @{
|
||||
|
||||
|
|
@ -97,23 +141,20 @@ public:
|
|||
* @param p Parameters, typically fixed in a single application
|
||||
* @param bias Current estimate of acceleration and rotation rate biases
|
||||
*/
|
||||
PreintegrationBase(const boost::shared_ptr<PreintegrationParams>& p,
|
||||
const imuBias::ConstantBias& biasHat = imuBias::ConstantBias()) :
|
||||
p_(p), biasHat_(biasHat) {
|
||||
resetIntegration();
|
||||
}
|
||||
PreintegrationBase(const boost::shared_ptr<Params>& p,
|
||||
const imuBias::ConstantBias& biasHat = imuBias::ConstantBias());
|
||||
|
||||
/**
|
||||
* Constructor which takes in all members for generic construction
|
||||
*/
|
||||
PreintegrationBase(const boost::shared_ptr<PreintegrationParams>& p, const imuBias::ConstantBias& biasHat,
|
||||
double deltaTij, const NavState& deltaXij, const Matrix3& delPdelBiasAcc,
|
||||
PreintegrationBase(const boost::shared_ptr<Params>& p, const imuBias::ConstantBias& biasHat,
|
||||
double deltaTij, const TangentVector& zeta, const Matrix3& delPdelBiasAcc,
|
||||
const Matrix3& delPdelBiasOmega, const Matrix3& delVdelBiasAcc,
|
||||
const Matrix3& delVdelBiasOmega)
|
||||
: p_(p),
|
||||
biasHat_(biasHat),
|
||||
deltaTij_(deltaTij),
|
||||
deltaXij_(deltaXij),
|
||||
deltaXij_(zeta),
|
||||
delPdelBiasAcc_(delPdelBiasAcc),
|
||||
delPdelBiasOmega_(delPdelBiasOmega),
|
||||
delVdelBiasAcc_(delVdelBiasAcc),
|
||||
|
|
@ -125,69 +166,55 @@ public:
|
|||
/// Re-initialize PreintegratedMeasurements
|
||||
void resetIntegration();
|
||||
|
||||
/// check parameters equality: checks whether shared pointer points to same PreintegrationParams object.
|
||||
/// check parameters equality: checks whether shared pointer points to same Params object.
|
||||
bool matchesParamsWith(const PreintegrationBase& other) const {
|
||||
return p_ == other.p_;
|
||||
}
|
||||
|
||||
/// shared pointer to params
|
||||
const boost::shared_ptr<PreintegrationParams>& params() const {
|
||||
const boost::shared_ptr<Params>& params() const {
|
||||
return p_;
|
||||
}
|
||||
|
||||
/// const reference to params
|
||||
const PreintegrationParams& p() const {
|
||||
return *boost::static_pointer_cast<PreintegrationParams>(p_);
|
||||
const Params& p() const {
|
||||
return *boost::static_pointer_cast<Params>(p_);
|
||||
}
|
||||
|
||||
#ifdef ALLOW_DEPRECATED_IN_GTSAM4
|
||||
void set_body_P_sensor(const Pose3& body_P_sensor) {
|
||||
p_->body_P_sensor = body_P_sensor;
|
||||
}
|
||||
/// @}
|
||||
#endif
|
||||
/// @}
|
||||
|
||||
/// @name Instance variables access
|
||||
/// @{
|
||||
const NavState& deltaXij() const {
|
||||
return deltaXij_;
|
||||
}
|
||||
const double& deltaTij() const {
|
||||
return deltaTij_;
|
||||
}
|
||||
const Rot3& deltaRij() const {
|
||||
return deltaXij_.attitude();
|
||||
}
|
||||
Vector3 deltaPij() const {
|
||||
return deltaXij_.position().vector();
|
||||
}
|
||||
Vector3 deltaVij() const {
|
||||
return deltaXij_.velocity();
|
||||
}
|
||||
const imuBias::ConstantBias& biasHat() const {
|
||||
return biasHat_;
|
||||
}
|
||||
const Matrix3& delRdelBiasOmega() const {
|
||||
return delRdelBiasOmega_;
|
||||
}
|
||||
const Matrix3& delPdelBiasAcc() const {
|
||||
return delPdelBiasAcc_;
|
||||
}
|
||||
const Matrix3& delPdelBiasOmega() const {
|
||||
return delPdelBiasOmega_;
|
||||
}
|
||||
const Matrix3& delVdelBiasAcc() const {
|
||||
return delVdelBiasAcc_;
|
||||
}
|
||||
const Matrix3& delVdelBiasOmega() const {
|
||||
return delVdelBiasOmega_;
|
||||
}
|
||||
const imuBias::ConstantBias& biasHat() const { return biasHat_; }
|
||||
const double& deltaTij() const { return deltaTij_; }
|
||||
|
||||
const Vector9& zeta() const { return deltaXij_.vector(); }
|
||||
|
||||
Vector3 theta() const { return deltaXij_.theta(); }
|
||||
Vector3 deltaPij() const { return deltaXij_.position(); }
|
||||
Vector3 deltaVij() const { return deltaXij_.velocity(); }
|
||||
|
||||
Rot3 deltaRij() const { return Rot3::Expmap(deltaXij_.theta()); }
|
||||
NavState deltaXij() const { return NavState::Retract(deltaXij_.vector()); }
|
||||
|
||||
const Matrix3& delRdelBiasOmega() const { return delRdelBiasOmega_; }
|
||||
const Matrix3& delPdelBiasAcc() const { return delPdelBiasAcc_; }
|
||||
const Matrix3& delPdelBiasOmega() const { return delPdelBiasOmega_; }
|
||||
const Matrix3& delVdelBiasAcc() const { return delVdelBiasAcc_; }
|
||||
const Matrix3& delVdelBiasOmega() const { return delVdelBiasOmega_; }
|
||||
|
||||
// Exposed for MATLAB
|
||||
Vector6 biasHatVector() const {
|
||||
return biasHat_.vector();
|
||||
}
|
||||
Vector6 biasHatVector() const { return biasHat_.vector(); }
|
||||
/// @}
|
||||
|
||||
/// @name Testable
|
||||
/// @{
|
||||
GTSAM_EXPORT friend std::ostream& operator<<(std::ostream& os, const PreintegrationBase& pim);
|
||||
void print(const std::string& s) const;
|
||||
bool equals(const PreintegrationBase& other, double tol) const;
|
||||
/// @}
|
||||
|
|
@ -204,19 +231,28 @@ public:
|
|||
OptionalJacobian<3, 3> D_correctedAcc_measuredOmega = boost::none,
|
||||
OptionalJacobian<3, 3> D_correctedOmega_measuredOmega = boost::none) const;
|
||||
|
||||
// Update integrated vector on tangent manifold zeta with acceleration
|
||||
// readings a_body and gyro readings w_body, bias-corrected in body frame.
|
||||
static TangentVector UpdateEstimate(const Vector3& a_body,
|
||||
const Vector3& w_body, double dt,
|
||||
const TangentVector& zeta,
|
||||
OptionalJacobian<9, 9> A = boost::none,
|
||||
OptionalJacobian<9, 3> B = boost::none,
|
||||
OptionalJacobian<9, 3> C = boost::none);
|
||||
|
||||
/// Calculate the updated preintegrated measurement, does not modify
|
||||
/// It takes measured quantities in the j frame
|
||||
NavState updatedDeltaXij(const Vector3& j_measuredAcc,
|
||||
const Vector3& j_measuredOmega, const double dt,
|
||||
OptionalJacobian<9, 9> D_updated_current = boost::none,
|
||||
OptionalJacobian<9, 3> D_updated_measuredAcc = boost::none,
|
||||
OptionalJacobian<9, 3> D_updated_measuredOmega = boost::none) const;
|
||||
PreintegrationBase::TangentVector updatedDeltaXij(
|
||||
const Vector3& j_measuredAcc, const Vector3& j_measuredOmega, double dt,
|
||||
OptionalJacobian<9, 9> A = boost::none,
|
||||
OptionalJacobian<9, 3> B = boost::none,
|
||||
OptionalJacobian<9, 3> C = boost::none) const;
|
||||
|
||||
/// Update preintegrated measurements and get derivatives
|
||||
/// It takes measured quantities in the j frame
|
||||
void update(const Vector3& j_measuredAcc, const Vector3& j_measuredOmega,
|
||||
const double deltaT, Matrix3* D_incrR_integratedOmega, Matrix9* D_updated_current,
|
||||
Matrix93* D_udpated_measuredAcc, Matrix93* D_updated_measuredOmega);
|
||||
const double deltaT, Matrix3* D_incrR_integratedOmega, Matrix9* A,
|
||||
Matrix93* B, Matrix93* C);
|
||||
|
||||
/// Given the estimate of the bias, return a NavState tangent vector
|
||||
/// summarizing the preintegrated IMU measurements so far
|
||||
|
|
@ -225,8 +261,14 @@ public:
|
|||
|
||||
/// Predict state at time j
|
||||
NavState predict(const NavState& state_i, const imuBias::ConstantBias& bias_i,
|
||||
OptionalJacobian<9, 9> H1 = boost::none, OptionalJacobian<9, 6> H2 =
|
||||
boost::none) const;
|
||||
OptionalJacobian<9, 9> H1 = boost::none,
|
||||
OptionalJacobian<9, 6> H2 = boost::none) const;
|
||||
|
||||
/// Calculate error given navStates
|
||||
Vector9 computeError(const NavState& state_i, const NavState& state_j,
|
||||
const imuBias::ConstantBias& bias_i,
|
||||
OptionalJacobian<9, 9> H1, OptionalJacobian<9, 9> H2,
|
||||
OptionalJacobian<9, 6> H3) const;
|
||||
|
||||
/// Compute errors w.r.t. preintegrated measurements and jacobians wrt pose_i, vel_i, bias_i, pose_j, bias_j
|
||||
Vector9 computeErrorAndJacobians(const Pose3& pose_i, const Vector3& vel_i,
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
#pragma once
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/navigation/NavState.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
|
@ -34,6 +34,7 @@ class Scenario {
|
|||
// Derived quantities:
|
||||
|
||||
Rot3 rotation(double t) const { return pose(t).rotation(); }
|
||||
NavState navState(double t) const { return NavState(pose(t), velocity_n(t)); }
|
||||
|
||||
Vector3 velocity_b(double t) const {
|
||||
const Rot3 nRb = rotation(t);
|
||||
|
|
@ -46,13 +47,18 @@ class Scenario {
|
|||
}
|
||||
};
|
||||
|
||||
/// Scenario with constant twist 3D trajectory.
|
||||
/**
|
||||
* Scenario with constant twist 3D trajectory.
|
||||
* Note that a plane flying level does not feel any acceleration: gravity is
|
||||
* being perfectly compensated by the lift generated by the wings, and drag is
|
||||
* compensated by thrust. The accelerometer will add the gravity component back
|
||||
* in, as modeled by the specificForce method in ScenarioRunner.
|
||||
*/
|
||||
class ConstantTwistScenario : public Scenario {
|
||||
public:
|
||||
/// Construct scenario with constant twist [w,v]
|
||||
ConstantTwistScenario(const Vector3& w, const Vector3& v)
|
||||
: twist_((Vector6() << w, v).finished()),
|
||||
a_b_(twist_.head<3>().cross(twist_.tail<3>())) {}
|
||||
: twist_((Vector6() << w, v).finished()), a_b_(w.cross(v)) {}
|
||||
|
||||
Pose3 pose(double t) const override { return Pose3::Expmap(twist_ * t); }
|
||||
Vector3 omega_b(double t) const override { return twist_.head<3>(); }
|
||||
|
|
|
|||
|
|
@ -21,14 +21,16 @@
|
|||
#include <boost/assign.hpp>
|
||||
#include <cmath>
|
||||
|
||||
using namespace std;
|
||||
using namespace boost::assign;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
static double intNoiseVar = 0.0000001;
|
||||
static const Matrix3 kIntegrationErrorCovariance = intNoiseVar * I_3x3;
|
||||
|
||||
PreintegratedImuMeasurements ScenarioRunner::integrate(
|
||||
double T, const imuBias::ConstantBias& estimatedBias,
|
||||
bool corrupted) const {
|
||||
double T, const Bias& estimatedBias, bool corrupted) const {
|
||||
PreintegratedImuMeasurements pim(p_, estimatedBias);
|
||||
|
||||
const double dt = imuSampleTime();
|
||||
|
|
@ -45,37 +47,57 @@ PreintegratedImuMeasurements ScenarioRunner::integrate(
|
|||
return pim;
|
||||
}
|
||||
|
||||
NavState ScenarioRunner::predict(
|
||||
const PreintegratedImuMeasurements& pim,
|
||||
const imuBias::ConstantBias& estimatedBias) const {
|
||||
NavState ScenarioRunner::predict(const PreintegratedImuMeasurements& pim,
|
||||
const Bias& estimatedBias) const {
|
||||
const NavState state_i(scenario_->pose(0), scenario_->velocity_n(0));
|
||||
return pim.predict(state_i, estimatedBias);
|
||||
}
|
||||
|
||||
Matrix6 ScenarioRunner::estimatePoseCovariance(
|
||||
double T, size_t N, const imuBias::ConstantBias& estimatedBias) const {
|
||||
gttic_(estimatePoseCovariance);
|
||||
Matrix9 ScenarioRunner::estimateCovariance(double T, size_t N,
|
||||
const Bias& estimatedBias) const {
|
||||
gttic_(estimateCovariance);
|
||||
|
||||
// Get predict prediction from ground truth measurements
|
||||
Pose3 prediction = predict(integrate(T)).pose();
|
||||
NavState prediction = predict(integrate(T));
|
||||
|
||||
// Sample !
|
||||
Matrix samples(9, N);
|
||||
Vector6 sum = Vector6::Zero();
|
||||
Vector9 sum = Vector9::Zero();
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
Pose3 sampled = predict(integrate(T, estimatedBias, true)).pose();
|
||||
Vector6 xi = sampled.localCoordinates(prediction);
|
||||
auto pim = integrate(T, estimatedBias, true);
|
||||
NavState sampled = predict(pim);
|
||||
Vector9 xi = sampled.localCoordinates(prediction);
|
||||
samples.col(i) = xi;
|
||||
sum += xi;
|
||||
}
|
||||
|
||||
// Compute MC covariance
|
||||
Vector9 sampleMean = sum / N;
|
||||
Matrix9 Q;
|
||||
Q.setZero();
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
Vector9 xi = samples.col(i) - sampleMean;
|
||||
Q += xi * xi.transpose();
|
||||
}
|
||||
|
||||
return Q / (N - 1);
|
||||
}
|
||||
|
||||
Matrix6 ScenarioRunner::estimateNoiseCovariance(size_t N) const {
|
||||
Matrix samples(6, N);
|
||||
Vector6 sum = Vector6::Zero();
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
samples.col(i) << accSampler_.sample() / sqrt_dt_,
|
||||
gyroSampler_.sample() / sqrt_dt_;
|
||||
sum += samples.col(i);
|
||||
}
|
||||
|
||||
// Compute MC covariance
|
||||
Vector6 sampleMean = sum / N;
|
||||
Matrix6 Q;
|
||||
Q.setZero();
|
||||
for (size_t i = 0; i < N; i++) {
|
||||
Vector6 xi = samples.col(i);
|
||||
xi -= sampleMean;
|
||||
Vector6 xi = samples.col(i) - sampleMean;
|
||||
Q += xi * xi.transpose();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -22,20 +22,42 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
// Convert covariance to diagonal noise model, if possible, otherwise throw
|
||||
static noiseModel::Diagonal::shared_ptr Diagonal(const Matrix& covariance) {
|
||||
bool smart = true;
|
||||
auto model = noiseModel::Gaussian::Covariance(covariance, smart);
|
||||
auto diagonal = boost::dynamic_pointer_cast<noiseModel::Diagonal>(model);
|
||||
if (!diagonal)
|
||||
throw std::invalid_argument("ScenarioRunner::Diagonal: not a diagonal");
|
||||
return diagonal;
|
||||
}
|
||||
|
||||
/*
|
||||
* Simple class to test navigation scenarios.
|
||||
* Takes a trajectory scenario as input, and can generate IMU measurements
|
||||
*/
|
||||
class ScenarioRunner {
|
||||
public:
|
||||
typedef boost::shared_ptr<PreintegrationParams> SharedParams;
|
||||
typedef imuBias::ConstantBias Bias;
|
||||
typedef boost::shared_ptr<PreintegratedImuMeasurements::Params> SharedParams;
|
||||
|
||||
private:
|
||||
const Scenario* scenario_;
|
||||
const SharedParams p_;
|
||||
const double imuSampleTime_, sqrt_dt_;
|
||||
const Bias estimatedBias_;
|
||||
|
||||
// Create two samplers for acceleration and omega noise
|
||||
mutable Sampler gyroSampler_, accSampler_;
|
||||
|
||||
public:
|
||||
ScenarioRunner(const Scenario* scenario, const SharedParams& p,
|
||||
double imuSampleTime = 1.0 / 100.0,
|
||||
const imuBias::ConstantBias& bias = imuBias::ConstantBias())
|
||||
double imuSampleTime = 1.0 / 100.0, const Bias& bias = Bias())
|
||||
: scenario_(scenario),
|
||||
p_(p),
|
||||
imuSampleTime_(imuSampleTime),
|
||||
bias_(bias),
|
||||
sqrt_dt_(std::sqrt(imuSampleTime)),
|
||||
estimatedBias_(bias),
|
||||
// NOTE(duy): random seeds that work well:
|
||||
gyroSampler_(Diagonal(p->gyroscopeCovariance), 10),
|
||||
accSampler_(Diagonal(p->accelerometerCovariance), 29284) {}
|
||||
|
|
@ -57,59 +79,31 @@ class ScenarioRunner {
|
|||
|
||||
// versions corrupted by bias and noise
|
||||
Vector3 measuredAngularVelocity(double t) const {
|
||||
return actualAngularVelocity(t) + bias_.gyroscope() +
|
||||
gyroSampler_.sample() / std::sqrt(imuSampleTime_);
|
||||
return actualAngularVelocity(t) + estimatedBias_.gyroscope() +
|
||||
gyroSampler_.sample() / sqrt_dt_;
|
||||
}
|
||||
Vector3 measuredSpecificForce(double t) const {
|
||||
return actualSpecificForce(t) + bias_.accelerometer() +
|
||||
accSampler_.sample() / std::sqrt(imuSampleTime_);
|
||||
return actualSpecificForce(t) + estimatedBias_.accelerometer() +
|
||||
accSampler_.sample() / sqrt_dt_;
|
||||
}
|
||||
|
||||
const double& imuSampleTime() const { return imuSampleTime_; }
|
||||
|
||||
/// Integrate measurements for T seconds into a PIM
|
||||
PreintegratedImuMeasurements integrate(
|
||||
double T,
|
||||
const imuBias::ConstantBias& estimatedBias = imuBias::ConstantBias(),
|
||||
bool corrupted = false) const;
|
||||
PreintegratedImuMeasurements integrate(double T,
|
||||
const Bias& estimatedBias = Bias(),
|
||||
bool corrupted = false) const;
|
||||
|
||||
/// Predict predict given a PIM
|
||||
NavState predict(const PreintegratedImuMeasurements& pim,
|
||||
const imuBias::ConstantBias& estimatedBias =
|
||||
imuBias::ConstantBias()) const;
|
||||
const Bias& estimatedBias = Bias()) const;
|
||||
|
||||
/// Return pose covariance by re-arranging pim.preintMeasCov() appropriately
|
||||
Matrix6 poseCovariance(const PreintegratedImuMeasurements& pim) const {
|
||||
Matrix9 cov = pim.preintMeasCov();
|
||||
Matrix6 poseCov;
|
||||
poseCov << cov.block<3, 3>(0, 0), cov.block<3, 3>(0, 3), //
|
||||
cov.block<3, 3>(3, 0), cov.block<3, 3>(3, 3);
|
||||
return poseCov;
|
||||
}
|
||||
/// Compute a Monte Carlo estimate of the predict covariance using N samples
|
||||
Matrix9 estimateCovariance(double T, size_t N = 1000,
|
||||
const Bias& estimatedBias = Bias()) const;
|
||||
|
||||
/// Compute a Monte Carlo estimate of the PIM pose covariance using N samples
|
||||
Matrix6 estimatePoseCovariance(double T, size_t N = 1000,
|
||||
const imuBias::ConstantBias& estimatedBias =
|
||||
imuBias::ConstantBias()) const;
|
||||
|
||||
private:
|
||||
// Convert covariance to diagonal noise model, if possible, otherwise throw
|
||||
static noiseModel::Diagonal::shared_ptr Diagonal(const Matrix& covariance) {
|
||||
bool smart = true;
|
||||
auto model = noiseModel::Gaussian::Covariance(covariance, smart);
|
||||
auto diagonal = boost::dynamic_pointer_cast<noiseModel::Diagonal>(model);
|
||||
if (!diagonal)
|
||||
throw std::invalid_argument("ScenarioRunner::Diagonal: not a diagonal");
|
||||
return diagonal;
|
||||
}
|
||||
|
||||
const Scenario* scenario_;
|
||||
const SharedParams p_;
|
||||
const double imuSampleTime_;
|
||||
const imuBias::ConstantBias bias_;
|
||||
|
||||
// Create two samplers for acceleration and omega noise
|
||||
mutable Sampler gyroSampler_, accSampler_;
|
||||
/// Estimate covariance of sampled noise for sanity-check
|
||||
Matrix6 estimateNoiseCovariance(size_t N = 1000) const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -221,9 +221,9 @@ TEST( CombinedImuFactor, FirstOrderPreIntegratedMeasurements ) {
|
|||
|
||||
// Compare Jacobians
|
||||
EXPECT(assert_equal(expectedDelPdelBiasAcc, pim.delPdelBiasAcc()));
|
||||
EXPECT(assert_equal(expectedDelPdelBiasOmega, pim.delPdelBiasOmega()));
|
||||
EXPECT(assert_equal(expectedDelPdelBiasOmega, pim.delPdelBiasOmega(),1e-8));
|
||||
EXPECT(assert_equal(expectedDelVdelBiasAcc, pim.delVdelBiasAcc()));
|
||||
EXPECT(assert_equal(expectedDelVdelBiasOmega, pim.delVdelBiasOmega()));
|
||||
EXPECT(assert_equal(expectedDelVdelBiasOmega, pim.delVdelBiasOmega(),1e-8));
|
||||
EXPECT(assert_equal(expectedDelRdelBiasAcc, Matrix::Zero(3, 3)));
|
||||
EXPECT(assert_equal(expectedDelRdelBiasOmega, pim.delRdelBiasOmega(), 1e-3)); // 1e-3 needs to be added only when using quaternions for rotations
|
||||
}
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ static boost::shared_ptr<PreintegrationParams> defaultParams() {
|
|||
return p;
|
||||
}
|
||||
|
||||
// Auxiliary functions to test preintegrated Jacobians
|
||||
// Auxiliary functions to test pre-integrated Jacobians
|
||||
// delPdelBiasAcc_ delPdelBiasOmega_ delVdelBiasAcc_ delVdelBiasOmega_ delRdelBiasOmega_
|
||||
/* ************************************************************************* */
|
||||
PreintegratedImuMeasurements evaluatePreintegratedMeasurements(
|
||||
|
|
@ -140,24 +140,8 @@ TEST(ImuFactor, Accelerating) {
|
|||
PreintegratedImuMeasurements pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix6 estimatedCov = runner.estimatePoseCovariance(T);
|
||||
EXPECT(assert_equal(estimatedCov, runner.poseCovariance(pim), 0.1));
|
||||
|
||||
// Check G1 and G2 derivatives of pim.update
|
||||
Matrix93 aG1, aG2;
|
||||
boost::function<NavState(const Vector3&, const Vector3&)> f = boost::bind(
|
||||
&PreintegrationBase::updatedDeltaXij, pim, _1, _2, T / 10, boost::none,
|
||||
boost::none, boost::none);
|
||||
const Vector3 measuredAcc = runner.actualSpecificForce(T);
|
||||
const Vector3 measuredOmega = runner.actualAngularVelocity(T);
|
||||
pim.updatedDeltaXij(measuredAcc, measuredOmega, T / 10, boost::none, aG1,
|
||||
aG2);
|
||||
EXPECT(
|
||||
assert_equal(numericalDerivative21(f, measuredAcc, measuredOmega, 1e-7),
|
||||
aG1, 1e-7));
|
||||
EXPECT(
|
||||
assert_equal(numericalDerivative22(f, measuredAcc, measuredOmega, 1e-7),
|
||||
aG2, 1e-7));
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -167,14 +151,14 @@ TEST(ImuFactor, PreintegratedMeasurements) {
|
|||
Vector3 measuredOmega(M_PI / 100.0, 0.0, 0.0);
|
||||
double deltaT = 0.5;
|
||||
|
||||
// Expected preintegrated values
|
||||
// Expected pre-integrated values
|
||||
Vector3 expectedDeltaP1;
|
||||
expectedDeltaP1 << 0.5 * 0.1 * 0.5 * 0.5, 0, 0;
|
||||
Vector3 expectedDeltaV1(0.05, 0.0, 0.0);
|
||||
Rot3 expectedDeltaR1 = Rot3::RzRyRx(0.5 * M_PI / 100.0, 0.0, 0.0);
|
||||
double expectedDeltaT1(0.5);
|
||||
|
||||
// Actual preintegrated values
|
||||
// Actual pre-integrated values
|
||||
PreintegratedImuMeasurements actual1(defaultParams());
|
||||
actual1.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
||||
|
||||
|
|
@ -183,6 +167,24 @@ TEST(ImuFactor, PreintegratedMeasurements) {
|
|||
EXPECT(assert_equal(expectedDeltaR1, Rot3(actual1.deltaRij())));
|
||||
DOUBLES_EQUAL(expectedDeltaT1, actual1.deltaTij(), 1e-9);
|
||||
|
||||
// Check derivatives of computeError
|
||||
imuBias::ConstantBias bias(Vector3(0.2, 0, 0), Vector3(0.1, 0, 0.3)); // Biases (acc, rot)
|
||||
NavState x1, x2 = actual1.predict(x1, bias);
|
||||
|
||||
{
|
||||
Matrix9 aH1, aH2;
|
||||
Matrix96 aH3;
|
||||
actual1.computeError(x1, x2, bias, aH1, aH2, aH3);
|
||||
boost::function<Vector9(const NavState&, const NavState&,
|
||||
const imuBias::ConstantBias&)> f =
|
||||
boost::bind(&PreintegrationBase::computeError, actual1, _1, _2, _3,
|
||||
boost::none, boost::none, boost::none);
|
||||
// NOTE(frank): tolerance of 1e-3 on H1 because approximate away from 0
|
||||
EXPECT(assert_equal(numericalDerivative31(f, x1, x2, bias), aH1, 1e-9));
|
||||
EXPECT(assert_equal(numericalDerivative32(f, x1, x2, bias), aH2, 1e-9));
|
||||
EXPECT(assert_equal(numericalDerivative33(f, x1, x2, bias), aH3, 1e-9));
|
||||
}
|
||||
|
||||
// Integrate again
|
||||
Vector3 expectedDeltaP2;
|
||||
expectedDeltaP2 << 0.025 + expectedDeltaP1(0) + 0.5 * 0.1 * 0.5 * 0.5, 0, 0;
|
||||
|
|
@ -191,7 +193,7 @@ TEST(ImuFactor, PreintegratedMeasurements) {
|
|||
Rot3 expectedDeltaR2 = Rot3::RzRyRx(2.0 * 0.5 * M_PI / 100.0, 0.0, 0.0);
|
||||
double expectedDeltaT2(1);
|
||||
|
||||
// Actual preintegrated values
|
||||
// Actual pre-integrated values
|
||||
PreintegratedImuMeasurements actual2 = actual1;
|
||||
actual2.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
||||
|
||||
|
|
@ -200,7 +202,6 @@ TEST(ImuFactor, PreintegratedMeasurements) {
|
|||
EXPECT(assert_equal(expectedDeltaR2, Rot3(actual2.deltaRij())));
|
||||
DOUBLES_EQUAL(expectedDeltaT2, actual2.deltaTij(), 1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Common linearization point and measurements for tests
|
||||
namespace common {
|
||||
|
|
@ -498,7 +499,7 @@ TEST(ImuFactor, FirstOrderPreIntegratedMeasurements) {
|
|||
deltaTs.push_back(0.01);
|
||||
}
|
||||
|
||||
// Actual preintegrated values
|
||||
// Actual pre-integrated values
|
||||
PreintegratedImuMeasurements preintegrated =
|
||||
evaluatePreintegratedMeasurements(kZeroBias, measuredAccs, measuredOmegas,
|
||||
deltaTs);
|
||||
|
|
@ -528,13 +529,13 @@ TEST(ImuFactor, FirstOrderPreIntegratedMeasurements) {
|
|||
// Compare Jacobians
|
||||
EXPECT(assert_equal(expectedDelPdelBiasAcc, preintegrated.delPdelBiasAcc()));
|
||||
EXPECT(
|
||||
assert_equal(expectedDelPdelBiasOmega, preintegrated.delPdelBiasOmega()));
|
||||
assert_equal(expectedDelPdelBiasOmega, preintegrated.delPdelBiasOmega(),1e-8));
|
||||
EXPECT(assert_equal(expectedDelVdelBiasAcc, preintegrated.delVdelBiasAcc()));
|
||||
EXPECT(
|
||||
assert_equal(expectedDelVdelBiasOmega, preintegrated.delVdelBiasOmega()));
|
||||
assert_equal(expectedDelVdelBiasOmega, preintegrated.delVdelBiasOmega(),1e-8));
|
||||
EXPECT(assert_equal(expectedDelRdelBiasAcc, Matrix::Zero(3, 3)));
|
||||
EXPECT(
|
||||
assert_equal(expectedDelRdelBiasOmega, preintegrated.delRdelBiasOmega()));
|
||||
assert_equal(expectedDelRdelBiasOmega, preintegrated.delRdelBiasOmega(),1e-7));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -581,34 +582,33 @@ TEST(ImuFactor, ErrorWithBiasesAndSensorBodyDisplacement) {
|
|||
PreintegratedImuMeasurements pim(p, biasHat);
|
||||
|
||||
// Check updatedDeltaXij derivatives
|
||||
Matrix3 D_correctedAcc_measuredOmega = Matrix3::Zero();
|
||||
Matrix3 D_correctedAcc_measuredOmega = Z_3x3;
|
||||
pim.correctMeasurementsByBiasAndSensorPose(measuredAcc, measuredOmega,
|
||||
boost::none, D_correctedAcc_measuredOmega, boost::none);
|
||||
Matrix3 expectedD = numericalDerivative11<Vector3, Vector3>(
|
||||
boost::bind(correctedAcc, pim, measuredAcc, _1), measuredOmega, 1e-6);
|
||||
EXPECT(assert_equal(expectedD, D_correctedAcc_measuredOmega, 1e-5));
|
||||
|
||||
Matrix93 G1, G2;
|
||||
double dt = 0.1;
|
||||
NavState preint = pim.updatedDeltaXij(measuredAcc, measuredOmega, dt,
|
||||
boost::none, G1, G2);
|
||||
// Matrix9 preintCov = G1*((accNoiseVar2/dt).asDiagonal())*G1.transpose() + G2*((omegaNoiseVar2/dt).asDiagonal())*G2.transpose();
|
||||
|
||||
Matrix93 expectedG1 = numericalDerivative21<NavState, Vector3, Vector3>(
|
||||
boost::bind(&PreintegratedImuMeasurements::updatedDeltaXij, pim, _1, _2,
|
||||
dt, boost::none, boost::none, boost::none), measuredAcc,
|
||||
measuredOmega, 1e-6);
|
||||
EXPECT(assert_equal(expectedG1, G1, 1e-5));
|
||||
|
||||
Matrix93 expectedG2 = numericalDerivative22<NavState, Vector3, Vector3>(
|
||||
boost::bind(&PreintegratedImuMeasurements::updatedDeltaXij, pim, _1, _2,
|
||||
dt, boost::none, boost::none, boost::none), measuredAcc,
|
||||
measuredOmega, 1e-6);
|
||||
EXPECT(assert_equal(expectedG2, G2, 1e-5));
|
||||
// TODO(frank): revive derivative tests
|
||||
// Matrix93 G1, G2;
|
||||
// PreintegrationBase::TangentVector preint =
|
||||
// pim.updatedDeltaXij(measuredAcc, measuredOmega, dt, boost::none, G1, G2);
|
||||
//
|
||||
// Matrix93 expectedG1 = numericalDerivative21<NavState, Vector3, Vector3>(
|
||||
// boost::bind(&PreintegratedImuMeasurements::updatedDeltaXij, pim, _1, _2,
|
||||
// dt, boost::none, boost::none, boost::none), measuredAcc,
|
||||
// measuredOmega, 1e-6);
|
||||
// EXPECT(assert_equal(expectedG1, G1, 1e-5));
|
||||
//
|
||||
// Matrix93 expectedG2 = numericalDerivative22<NavState, Vector3, Vector3>(
|
||||
// boost::bind(&PreintegratedImuMeasurements::updatedDeltaXij, pim, _1, _2,
|
||||
// dt, boost::none, boost::none, boost::none), measuredAcc,
|
||||
// measuredOmega, 1e-6);
|
||||
// EXPECT(assert_equal(expectedG2, G2, 1e-5));
|
||||
|
||||
imuBias::ConstantBias bias(Vector3(0.2, 0, 0), Vector3(0, 0, 0.3)); // Biases (acc, rot)
|
||||
// EXPECT(MonteCarlo(pim, NavState(x1, initial_velocity), bias, dt, body_P_sensor,
|
||||
// measuredAcc, measuredOmega, accNoiseVar2, omegaNoiseVar2, 100000));
|
||||
|
||||
// integrate at least twice to get position information
|
||||
// otherwise factor cov noise from preint_cov is not positive definite
|
||||
|
|
@ -629,8 +629,6 @@ TEST(ImuFactor, ErrorWithBiasesAndSensorBodyDisplacement) {
|
|||
values.insert(V(2), v2);
|
||||
values.insert(B(1), bias);
|
||||
|
||||
// factor.get_noiseModel()->print("noise: "); // Make sure the noise is valid
|
||||
|
||||
// Make sure linearization is correct
|
||||
double diffDelta = 1e-8;
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, diffDelta, 1e-3);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,99 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testPreintegrationBase.cpp
|
||||
* @brief Unit test for the InertialNavFactor
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam/navigation/PreintegrationBase.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <boost/bind.hpp>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static const double kDt = 0.1;
|
||||
|
||||
static const double kGyroSigma = 0.02;
|
||||
static const double kAccelSigma = 0.1;
|
||||
|
||||
// Create default parameters with Z-down and above noise parameters
|
||||
static boost::shared_ptr<PreintegrationBase::Params> defaultParams() {
|
||||
auto p = PreintegrationParams::MakeSharedD(10);
|
||||
p->gyroscopeCovariance = kGyroSigma * kGyroSigma * I_3x3;
|
||||
p->accelerometerCovariance = kAccelSigma * kAccelSigma * I_3x3;
|
||||
p->integrationCovariance = 0.0000001 * I_3x3;
|
||||
return p;
|
||||
}
|
||||
|
||||
Vector9 f(const Vector9& zeta, const Vector3& a, const Vector3& w) {
|
||||
PreintegrationBase::TangentVector zeta_plus =
|
||||
PreintegrationBase::UpdateEstimate(a, w, kDt, zeta);
|
||||
return zeta_plus.vector();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(PreintegrationBase, UpdateEstimate1) {
|
||||
PreintegrationBase pim(defaultParams());
|
||||
const Vector3 acc(0.1, 0.2, 10), omega(0.1, 0.2, 0.3);
|
||||
Vector9 zeta;
|
||||
zeta.setZero();
|
||||
Matrix9 aH1;
|
||||
Matrix93 aH2, aH3;
|
||||
pim.UpdateEstimate(acc, omega, kDt, zeta, aH1, aH2, aH3);
|
||||
EXPECT(assert_equal(numericalDerivative31(f, zeta, acc, omega), aH1, 1e-9));
|
||||
EXPECT(assert_equal(numericalDerivative32(f, zeta, acc, omega), aH2, 1e-9));
|
||||
EXPECT(assert_equal(numericalDerivative33(f, zeta, acc, omega), aH3, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(PreintegrationBase, UpdateEstimate2) {
|
||||
PreintegrationBase pim(defaultParams());
|
||||
const Vector3 acc(0.1, 0.2, 10), omega(0.1, 0.2, 0.3);
|
||||
Vector9 zeta;
|
||||
zeta << 0.01, 0.02, 0.03, 100, 200, 300, 10, 5, 3;
|
||||
Matrix9 aH1;
|
||||
Matrix93 aH2, aH3;
|
||||
pim.UpdateEstimate(acc, omega, kDt, zeta, aH1, aH2, aH3);
|
||||
// NOTE(frank): tolerance of 1e-3 on H1 because approximate away from 0
|
||||
EXPECT(assert_equal(numericalDerivative31(f, zeta, acc, omega), aH1, 1e-3));
|
||||
EXPECT(assert_equal(numericalDerivative32(f, zeta, acc, omega), aH2, 1e-7));
|
||||
EXPECT(assert_equal(numericalDerivative33(f, zeta, acc, omega), aH3, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(PreintegrationBase, computeError) {
|
||||
PreintegrationBase pim(defaultParams());
|
||||
NavState x1, x2;
|
||||
imuBias::ConstantBias bias;
|
||||
Matrix9 aH1, aH2;
|
||||
Matrix96 aH3;
|
||||
pim.computeError(x1, x2, bias, aH1, aH2, aH3);
|
||||
boost::function<Vector9(const NavState&, const NavState&,
|
||||
const imuBias::ConstantBias&)> f =
|
||||
boost::bind(&PreintegrationBase::computeError, pim, _1, _2, _3,
|
||||
boost::none, boost::none, boost::none);
|
||||
// NOTE(frank): tolerance of 1e-3 on H1 because approximate away from 0
|
||||
EXPECT(assert_equal(numericalDerivative31(f, x1, x2, bias), aH1, 1e-9));
|
||||
EXPECT(assert_equal(numericalDerivative32(f, x1, x2, bias), aH2, 1e-9));
|
||||
EXPECT(assert_equal(numericalDerivative33(f, x1, x2, bias), aH3, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -25,7 +25,24 @@
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static const double degree = M_PI / 180.0;
|
||||
static const double kDegree = M_PI / 180.0;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Scenario, Spin) {
|
||||
// angular velocity 6 kDegree/sec
|
||||
const double w = 6 * kDegree;
|
||||
const Vector3 W(0, 0, w), V(0, 0, 0);
|
||||
const ConstantTwistScenario scenario(W, V);
|
||||
|
||||
const double T = 10;
|
||||
EXPECT(assert_equal(W, scenario.omega_b(T), 1e-9));
|
||||
EXPECT(assert_equal(V, scenario.velocity_b(T), 1e-9));
|
||||
EXPECT(assert_equal(W.cross(V), scenario.acceleration_b(T), 1e-9));
|
||||
|
||||
const Pose3 T10 = scenario.pose(T);
|
||||
EXPECT(assert_equal(Vector3(0, 0, 60 * kDegree), T10.rotation().xyz(), 1e-9));
|
||||
EXPECT(assert_equal(Point3(0, 0, 0), T10.translation(), 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Scenario, Forward) {
|
||||
|
|
@ -45,8 +62,8 @@ TEST(Scenario, Forward) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
TEST(Scenario, Circle) {
|
||||
// Forward velocity 2m/s, angular velocity 6 degree/sec around Z
|
||||
const double v = 2, w = 6 * degree;
|
||||
// Forward velocity 2m/s, angular velocity 6 kDegree/sec around Z
|
||||
const double v = 2, w = 6 * kDegree;
|
||||
const Vector3 W(0, 0, w), V(v, 0, 0);
|
||||
const ConstantTwistScenario scenario(W, V);
|
||||
|
||||
|
|
@ -58,15 +75,15 @@ TEST(Scenario, Circle) {
|
|||
// R = v/w, so test if circle is of right size
|
||||
const double R = v / w;
|
||||
const Pose3 T15 = scenario.pose(T);
|
||||
EXPECT(assert_equal(Vector3(0, 0, 90 * degree), T15.rotation().xyz(), 1e-9));
|
||||
EXPECT(assert_equal(Vector3(0, 0, 90 * kDegree), T15.rotation().xyz(), 1e-9));
|
||||
EXPECT(assert_equal(Point3(R, R, 0), T15.translation(), 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Scenario, Loop) {
|
||||
// Forward velocity 2m/s
|
||||
// Pitch up with angular velocity 6 degree/sec (negative in FLU)
|
||||
const double v = 2, w = 6 * degree;
|
||||
// Pitch up with angular velocity 6 kDegree/sec (negative in FLU)
|
||||
const double v = 2, w = 6 * kDegree;
|
||||
const Vector3 W(0, -w, 0), V(v, 0, 0);
|
||||
const ConstantTwistScenario scenario(W, V);
|
||||
|
||||
|
|
@ -100,10 +117,10 @@ TEST(Scenario, Accelerating) {
|
|||
EXPECT(assert_equal(A, scenario.acceleration_n(T), 1e-9));
|
||||
|
||||
{
|
||||
// Check acceleration in nav
|
||||
Matrix expected = numericalDerivative11<Vector3, double>(
|
||||
boost::bind(&Scenario::velocity_n, scenario, _1), T);
|
||||
EXPECT(assert_equal(Vector3(expected), scenario.acceleration_n(T), 1e-9));
|
||||
// Check acceleration in nav
|
||||
Matrix expected = numericalDerivative11<Vector3, double>(
|
||||
boost::bind(&Scenario::velocity_n, scenario, _1), T);
|
||||
EXPECT(assert_equal(Vector3(expected), scenario.acceleration_n(T), 1e-9));
|
||||
}
|
||||
|
||||
const Pose3 T3 = scenario.pose(3);
|
||||
|
|
|
|||
|
|
@ -1,174 +0,0 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testScenarioRunner.cpp
|
||||
* @brief test ImuFacor with ScenarioRunner class
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam/navigation/ScenarioRunner.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <cmath>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static const double kDegree = M_PI / 180.0;
|
||||
static const double kDeltaT = 1e-2;
|
||||
static const double kGyroSigma = 0.02;
|
||||
static const double kAccelSigma = 0.1;
|
||||
|
||||
static const Vector3 kAccBias(0.2, 0, 0), kRotBias(0.1, 0, 0.3);
|
||||
static const imuBias::ConstantBias kNonZeroBias(kAccBias, kRotBias);
|
||||
|
||||
// Create default parameters with Z-down and above noise parameters
|
||||
static boost::shared_ptr<PreintegrationParams> defaultParams() {
|
||||
auto p = PreintegrationParams::MakeSharedD(10);
|
||||
p->gyroscopeCovariance = kGyroSigma * kGyroSigma * I_3x3;
|
||||
p->accelerometerCovariance = kAccelSigma * kAccelSigma * I_3x3;
|
||||
p->integrationCovariance = 0.0000001 * I_3x3;
|
||||
return p;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace forward {
|
||||
const double v = 2; // m/s
|
||||
ConstantTwistScenario scenario(Vector3::Zero(), Vector3(v, 0, 0));
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Forward) {
|
||||
using namespace forward;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), kDeltaT);
|
||||
const double T = 0.1; // seconds
|
||||
|
||||
PreintegratedImuMeasurements pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix6 estimatedCov = runner.estimatePoseCovariance(T);
|
||||
EXPECT(assert_equal(estimatedCov, runner.poseCovariance(pim), 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, ForwardWithBias) {
|
||||
using namespace forward;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), kDeltaT);
|
||||
const double T = 0.1; // seconds
|
||||
|
||||
PreintegratedImuMeasurements pim = runner.integrate(T, kNonZeroBias);
|
||||
Matrix6 estimatedCov = runner.estimatePoseCovariance(T, 1000, kNonZeroBias);
|
||||
EXPECT(assert_equal(estimatedCov, runner.poseCovariance(pim), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Circle) {
|
||||
// Forward velocity 2m/s, angular velocity 6 kDegree/sec
|
||||
const double v = 2, w = 6 * kDegree;
|
||||
ConstantTwistScenario scenario(Vector3(0, 0, w), Vector3(v, 0, 0));
|
||||
|
||||
ScenarioRunner runner(&scenario, defaultParams(), kDeltaT);
|
||||
const double T = 0.1; // seconds
|
||||
|
||||
PreintegratedImuMeasurements pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 0.1));
|
||||
|
||||
Matrix6 estimatedCov = runner.estimatePoseCovariance(T);
|
||||
EXPECT(assert_equal(estimatedCov, runner.poseCovariance(pim), 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Loop) {
|
||||
// Forward velocity 2m/s
|
||||
// Pitch up with angular velocity 6 kDegree/sec (negative in FLU)
|
||||
const double v = 2, w = 6 * kDegree;
|
||||
ConstantTwistScenario scenario(Vector3(0, -w, 0), Vector3(v, 0, 0));
|
||||
|
||||
ScenarioRunner runner(&scenario, defaultParams(), kDeltaT);
|
||||
const double T = 0.1; // seconds
|
||||
|
||||
PreintegratedImuMeasurements pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 0.1));
|
||||
|
||||
Matrix6 estimatedCov = runner.estimatePoseCovariance(T);
|
||||
EXPECT(assert_equal(estimatedCov, runner.poseCovariance(pim), 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace initial {
|
||||
// Set up body pointing towards y axis, and start at 10,20,0 with velocity
|
||||
// going in X. The body itself has Z axis pointing down
|
||||
const Rot3 nRb(Point3(0, 1, 0), Point3(1, 0, 0), Point3(0, 0, -1));
|
||||
const Point3 P0(10, 20, 0);
|
||||
const Vector3 V0(50, 0, 0);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace accelerating {
|
||||
using namespace initial;
|
||||
const double a = 0.2; // m/s^2
|
||||
const Vector3 A(0, a, 0);
|
||||
const AcceleratingScenario scenario(nRb, P0, V0, A);
|
||||
|
||||
const double T = 3; // seconds
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Accelerating) {
|
||||
using namespace accelerating;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10);
|
||||
|
||||
PreintegratedImuMeasurements pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix6 estimatedCov = runner.estimatePoseCovariance(T);
|
||||
EXPECT(assert_equal(estimatedCov, runner.poseCovariance(pim), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// TODO(frank):Fails !
|
||||
// TEST(ScenarioRunner, AcceleratingWithBias) {
|
||||
// using namespace accelerating;
|
||||
// ScenarioRunner runner(&scenario, T / 10, kGyroSigma, kAccelSigma,
|
||||
// kNonZeroBias);
|
||||
//
|
||||
// PreintegratedImuMeasurements pim = runner.integrate(T,
|
||||
// kNonZeroBias);
|
||||
// Matrix6 estimatedCov = runner.estimatePoseCovariance(T, 10000,
|
||||
// kNonZeroBias);
|
||||
// EXPECT(assert_equal(estimatedCov, runner.poseCovariance(pim), 0.1));
|
||||
//}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, AcceleratingAndRotating) {
|
||||
using namespace initial;
|
||||
const double a = 0.2; // m/s^2
|
||||
const Vector3 A(0, a, 0), W(0, 0.1, 0);
|
||||
const AcceleratingScenario scenario(nRb, P0, V0, A, W);
|
||||
|
||||
const double T = 3; // seconds
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10);
|
||||
|
||||
PreintegratedImuMeasurements pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix6 estimatedCov = runner.estimatePoseCovariance(T);
|
||||
EXPECT(assert_equal(estimatedCov, runner.poseCovariance(pim), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
auto result = TestRegistry::runAllTests(tr);
|
||||
tictoc_print_();
|
||||
return result;
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -0,0 +1,385 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testScenarioRunner.cpp
|
||||
* @brief test ImuFacor with ScenarioRunner class
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam/navigation/ScenarioRunner.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <cmath>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static const double kDegree = M_PI / 180.0;
|
||||
static const double kDt = 1e-2;
|
||||
|
||||
// realistic white noise strengths are 0.5 deg/sqrt(hr) and 0.1 (m/s)/sqrt(h)
|
||||
static const double kGyroSigma = 0.5 * kDegree / 60;
|
||||
static const double kAccelSigma = 0.1 / 60.0;
|
||||
|
||||
static const Vector3 kAccBias(0.2, 0, 0), kRotBias(0.1, 0, 0.3);
|
||||
static const imuBias::ConstantBias kNonZeroBias(kAccBias, kRotBias);
|
||||
|
||||
// Create default parameters with Z-up and above noise parameters
|
||||
static boost::shared_ptr<PreintegratedImuMeasurements::Params> defaultParams() {
|
||||
auto p = PreintegrationParams::MakeSharedU(10);
|
||||
p->gyroscopeCovariance = kGyroSigma * kGyroSigma * I_3x3;
|
||||
p->accelerometerCovariance = kAccelSigma * kAccelSigma * I_3x3;
|
||||
p->integrationCovariance = 0.0000001 * I_3x3;
|
||||
return p;
|
||||
}
|
||||
|
||||
#define EXPECT_NEAR(a, b, c) EXPECT(assert_equal(Vector(a), Vector(b), c));
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Spin) {
|
||||
// angular velocity 6 degree/sec
|
||||
const double w = 6 * kDegree;
|
||||
const Vector3 W(0, 0, w), V(0, 0, 0);
|
||||
const ConstantTwistScenario scenario(W, V);
|
||||
|
||||
auto p = defaultParams();
|
||||
ScenarioRunner runner(&scenario, p, kDt);
|
||||
const double T = 2 * kDt; // seconds
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
#if 0
|
||||
// Check sampled noise is kosher
|
||||
Matrix6 expected;
|
||||
expected << p->accelerometerCovariance / kDt, Z_3x3, //
|
||||
Z_3x3, p->gyroscopeCovariance / kDt;
|
||||
Matrix6 actual = runner.estimateNoiseCovariance(10000);
|
||||
EXPECT(assert_equal(expected, actual, 1e-2));
|
||||
#endif
|
||||
|
||||
// Check calculated covariance against Monte Carlo estimate
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace forward {
|
||||
const double v = 2; // m/s
|
||||
ConstantTwistScenario scenario(Vector3::Zero(), Vector3(v, 0, 0));
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Forward) {
|
||||
using namespace forward;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), kDt);
|
||||
const double T = 0.1; // seconds
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT_NEAR(estimatedCov.diagonal(), pim.preintMeasCov().diagonal(), 0.1);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, ForwardWithBias) {
|
||||
using namespace forward;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), kDt);
|
||||
const double T = 0.1; // seconds
|
||||
|
||||
auto pim = runner.integrate(T, kNonZeroBias);
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T, 1000, kNonZeroBias);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Circle) {
|
||||
// Forward velocity 2m/s, angular velocity 6 degree/sec
|
||||
const double v = 2, w = 6 * kDegree;
|
||||
ConstantTwistScenario scenario(Vector3(0, 0, w), Vector3(v, 0, 0));
|
||||
|
||||
ScenarioRunner runner(&scenario, defaultParams(), kDt);
|
||||
const double T = 0.1; // seconds
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 0.1));
|
||||
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT_NEAR(estimatedCov.diagonal(), pim.preintMeasCov().diagonal(), 0.1);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Loop) {
|
||||
// Forward velocity 2m/s
|
||||
// Pitch up with angular velocity 6 degree/sec (negative in FLU)
|
||||
const double v = 2, w = 6 * kDegree;
|
||||
ConstantTwistScenario scenario(Vector3(0, -w, 0), Vector3(v, 0, 0));
|
||||
|
||||
ScenarioRunner runner(&scenario, defaultParams(), kDt);
|
||||
const double T = 0.1; // seconds
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 0.1));
|
||||
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT_NEAR(estimatedCov.diagonal(), pim.preintMeasCov().diagonal(), 0.1);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace initial {
|
||||
const Rot3 nRb;
|
||||
const Point3 P0;
|
||||
const Vector3 V0(0, 0, 0);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace accelerating {
|
||||
using namespace initial;
|
||||
const double a = 0.2; // m/s^2
|
||||
const Vector3 A(0, a, 0);
|
||||
const AcceleratingScenario scenario(nRb, P0, V0, A);
|
||||
|
||||
const double T = 3; // seconds
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Accelerating) {
|
||||
using namespace accelerating;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10);
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT_NEAR(estimatedCov.diagonal(), pim.preintMeasCov().diagonal(), 0.1);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, AcceleratingWithBias) {
|
||||
using namespace accelerating;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10, kNonZeroBias);
|
||||
|
||||
auto pim = runner.integrate(T, kNonZeroBias);
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, AcceleratingAndRotating) {
|
||||
using namespace initial;
|
||||
const double a = 0.2; // m/s^2
|
||||
const Vector3 A(0, a, 0), W(0, 0.1, 0);
|
||||
const AcceleratingScenario scenario(nRb, P0, V0, A, W);
|
||||
|
||||
const double T = 10; // seconds
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10);
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT_NEAR(estimatedCov.diagonal(), pim.preintMeasCov().diagonal(), 0.1);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace initial2 {
|
||||
// No rotation, but non-zero position and velocities
|
||||
const Rot3 nRb;
|
||||
const Point3 P0(10, 20, 0);
|
||||
const Vector3 V0(50, 0, 0);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace accelerating2 {
|
||||
using namespace initial2;
|
||||
const double a = 0.2; // m/s^2
|
||||
const Vector3 A(0, a, 0);
|
||||
const AcceleratingScenario scenario(nRb, P0, V0, A);
|
||||
|
||||
const double T = 3; // seconds
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Accelerating2) {
|
||||
using namespace accelerating2;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10);
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT_NEAR(estimatedCov.diagonal(), pim.preintMeasCov().diagonal(), 0.1);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, AcceleratingWithBias2) {
|
||||
using namespace accelerating2;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10, kNonZeroBias);
|
||||
|
||||
auto pim = runner.integrate(T, kNonZeroBias);
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, AcceleratingAndRotating2) {
|
||||
using namespace initial2;
|
||||
const double a = 0.2; // m/s^2
|
||||
const Vector3 A(0, a, 0), W(0, 0.1, 0);
|
||||
const AcceleratingScenario scenario(nRb, P0, V0, A, W);
|
||||
|
||||
const double T = 10; // seconds
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10);
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT_NEAR(estimatedCov.diagonal(), pim.preintMeasCov().diagonal(), 0.1);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace initial3 {
|
||||
// Rotation only
|
||||
// Set up body pointing towards y axis. The body itself has Z axis pointing down
|
||||
const Rot3 nRb(Point3(0, 1, 0), Point3(1, 0, 0), Point3(0, 0, -1));
|
||||
const Point3 P0;
|
||||
const Vector3 V0(0, 0, 0);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace accelerating3 {
|
||||
using namespace initial3;
|
||||
const double a = 0.2; // m/s^2
|
||||
const Vector3 A(0, a, 0);
|
||||
const AcceleratingScenario scenario(nRb, P0, V0, A);
|
||||
|
||||
const double T = 3; // seconds
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Accelerating3) {
|
||||
using namespace accelerating3;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10);
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT_NEAR(estimatedCov.diagonal(), pim.preintMeasCov().diagonal(), 0.1);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, AcceleratingWithBias3) {
|
||||
using namespace accelerating3;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10, kNonZeroBias);
|
||||
|
||||
auto pim = runner.integrate(T, kNonZeroBias);
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, AcceleratingAndRotating3) {
|
||||
using namespace initial3;
|
||||
const double a = 0.2; // m/s^2
|
||||
const Vector3 A(0, a, 0), W(0, 0.1, 0);
|
||||
const AcceleratingScenario scenario(nRb, P0, V0, A, W);
|
||||
|
||||
const double T = 10; // seconds
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10);
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT_NEAR(estimatedCov.diagonal(), pim.preintMeasCov().diagonal(), 0.1);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace initial4 {
|
||||
// Both rotation and translation
|
||||
// Set up body pointing towards y axis, and start at 10,20,0 with velocity
|
||||
// going in X. The body itself has Z axis pointing down
|
||||
const Rot3 nRb(Point3(0, 1, 0), Point3(1, 0, 0), Point3(0, 0, -1));
|
||||
const Point3 P0(10, 20, 0);
|
||||
const Vector3 V0(50, 0, 0);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
namespace accelerating4 {
|
||||
using namespace initial4;
|
||||
const double a = 0.2; // m/s^2
|
||||
const Vector3 A(0, a, 0);
|
||||
const AcceleratingScenario scenario(nRb, P0, V0, A);
|
||||
|
||||
const double T = 3; // seconds
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, Accelerating4) {
|
||||
using namespace accelerating4;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10);
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT_NEAR(estimatedCov.diagonal(), pim.preintMeasCov().diagonal(), 0.1);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, AcceleratingWithBias4) {
|
||||
using namespace accelerating4;
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10, kNonZeroBias);
|
||||
|
||||
auto pim = runner.integrate(T, kNonZeroBias);
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T, 10000, kNonZeroBias);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(ScenarioRunner, AcceleratingAndRotating4) {
|
||||
using namespace initial4;
|
||||
const double a = 0.2; // m/s^2
|
||||
const Vector3 A(0, a, 0), W(0, 0.1, 0);
|
||||
const AcceleratingScenario scenario(nRb, P0, V0, A, W);
|
||||
|
||||
const double T = 10; // seconds
|
||||
ScenarioRunner runner(&scenario, defaultParams(), T / 10);
|
||||
|
||||
auto pim = runner.integrate(T);
|
||||
EXPECT(assert_equal(scenario.pose(T), runner.predict(pim).pose(), 1e-9));
|
||||
|
||||
Matrix9 estimatedCov = runner.estimateCovariance(T);
|
||||
EXPECT_NEAR(estimatedCov.diagonal(), pim.preintMeasCov().diagonal(), 0.1);
|
||||
EXPECT(assert_equal(estimatedCov, pim.preintMeasCov(), 0.1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
auto result = TestRegistry::runAllTests(tr);
|
||||
tictoc_print_();
|
||||
return result;
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -0,0 +1 @@
|
|||
build/
|
||||
|
|
@ -0,0 +1,96 @@
|
|||
# Guard to avoid breaking this code in ccmake if by accident GTSAM_PYTHON_VERSION is set to an empty string
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "")
|
||||
set(GTSAM_PYTHON_VERSION "Default" CACHE STRING "Target python version for GTSAM python module. Use 'Default' to chose the default version" FORCE)
|
||||
endif()
|
||||
|
||||
# The code below allows to clear the PythonLibs cache if we change GTSAM_PYTHON_VERSION
|
||||
# Inspired from the solution found here: http://blog.bethcodes.com/cmake-tips-tricks-drop-down-list
|
||||
if(NOT DEFINED GTSAM_LAST_PYTHON_VERSION)
|
||||
set(GTSAM_LAST_PYTHON_VERSION ${GTSAM_PYTHON_VERSION} CACHE STRING "Python version used in the last build")
|
||||
mark_as_advanced(FORCE GTSAM_LAST_PYTHON_VERSION)
|
||||
endif()
|
||||
if(NOT (${GTSAM_PYTHON_VERSION} MATCHES ${GTSAM_LAST_PYTHON_VERSION}))
|
||||
unset(PYTHON_INCLUDE_DIR CACHE)
|
||||
unset(PYTHON_INCLUDE_DIR2 CACHE)
|
||||
unset(PYTHON_LIBRARY CACHE)
|
||||
unset(PYTHON_LIBRARY_DEBUG CACHE)
|
||||
set(GTSAM_LAST_PYTHON_VERSION ${GTSAM_PYTHON_VERSION} CACHE STRING "Updating python version used in the last build" FORCE)
|
||||
endif()
|
||||
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
# Search the default version.
|
||||
find_package(PythonInterp)
|
||||
find_package(PythonLibs)
|
||||
else()
|
||||
find_package(PythonInterp ${GTSAM_PYTHON_VERSION})
|
||||
find_package(PythonLibs ${GTSAM_PYTHON_VERSION})
|
||||
endif()
|
||||
|
||||
# Find NumPy C-API -- this is part of the numpy package
|
||||
find_package(NumPy)
|
||||
|
||||
# Compose strings used to specify the boost python version. They will be empty if we want to use the defaut
|
||||
if(NOT GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
string(REPLACE "." "" BOOST_PYTHON_VERSION_SUFFIX ${GTSAM_PYTHON_VERSION}) # Remove '.' from version
|
||||
string(SUBSTRING ${BOOST_PYTHON_VERSION_SUFFIX} 0 2 BOOST_PYTHON_VERSION_SUFFIX) # Trim version number to 2 digits
|
||||
set(BOOST_PYTHON_VERSION_SUFFIX "-py${BOOST_PYTHON_VERSION_SUFFIX}") # Append '-py' prefix
|
||||
string(TOUPPER ${BOOST_PYTHON_VERSION_SUFFIX} BOOST_PYTHON_VERSION_SUFFIX_UPPERCASE) # Get uppercase string
|
||||
else()
|
||||
set(BOOST_PYTHON_VERSION_SUFFIX "")
|
||||
set(BOOST_PYTHON_VERSION_SUFFIX_UPPERCASE "")
|
||||
endif()
|
||||
|
||||
# Find Boost Python
|
||||
find_package(Boost COMPONENTS python${BOOST_PYTHON_VERSION_SUFFIX})
|
||||
|
||||
if(Boost_PYTHON${BOOST_PYTHON_VERSION_SUFFIX_UPPERCASE}_FOUND AND PYTHONLIBS_FOUND AND NUMPY_FOUND)
|
||||
# Build library
|
||||
include_directories(${NUMPY_INCLUDE_DIRS})
|
||||
include_directories(${PYTHON_INCLUDE_DIRS})
|
||||
include_directories(${Boost_INCLUDE_DIRS})
|
||||
include_directories(${CMAKE_SOURCE_DIR}/gtsam/3rdparty/numpy_eigen/include/)
|
||||
|
||||
# Build the python module library
|
||||
add_subdirectory(handwritten)
|
||||
|
||||
# Create and invoke setup.py, see https://bloerg.net/2012/11/10/cmake-and-distutils.html
|
||||
set(SETUP_PY_IN "${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in")
|
||||
set(SETUP_PY "${CMAKE_CURRENT_BINARY_DIR}/setup.py")
|
||||
|
||||
# Hacky way to figure out install folder - valid for Linux & Mac
|
||||
# default pattern: prefix/lib/pythonX.Y/site-packages from https://docs.python.org/2/install/
|
||||
SET(PY_INSTALL_FOLDER "${CMAKE_INSTALL_PREFIX}/lib/python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}/site-packages")
|
||||
|
||||
configure_file(${SETUP_PY_IN} ${SETUP_PY})
|
||||
|
||||
# TODO(frank): possibly support a different prefix a la matlab wrapper
|
||||
install(CODE "execute_process(WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMAND ${PYTHON_EXECUTABLE} setup.py -v install --prefix ${CMAKE_INSTALL_PREFIX})")
|
||||
else()
|
||||
# Disable python module if we didn't find required libraries
|
||||
# message will print at end of main CMakeLists.txt
|
||||
SET(GTSAM_PYTHON_WARNINGS "Python dependencies not found - Python module will not be built. Set GTSAM_BUILD_PYTHON to 'Off' to disable this warning. Details:")
|
||||
|
||||
if(NOT PYTHONLIBS_FOUND)
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
SET(GTSAM_PYTHON_WARNINGS "${GTSAM_PYTHON_WARNINGS}\n -- Default PythonLibs not found")
|
||||
else()
|
||||
SET(GTSAM_PYTHON_WARNINGS "${GTSAM_PYTHON_WARNINGS}\n -- PythonLibs version ${GTSAM_PYTHON_VERSION} not found")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT NUMPY_FOUND)
|
||||
SET(GTSAM_PYTHON_WARNINGS "${GTSAM_PYTHON_WARNINGS}\n -- Numpy not found")
|
||||
endif()
|
||||
|
||||
if(NOT Boost_PYTHON${BOOST_PYTHON_VERSION_SUFFIX_UPPERCASE}_FOUND)
|
||||
if(GTSAM_PYTHON_VERSION STREQUAL "Default")
|
||||
SET(GTSAM_PYTHON_WARNINGS "${GTSAM_PYTHON_WARNINGS}\n -- Default Boost python not found")
|
||||
else()
|
||||
SET(GTSAM_PYTHON_WARNINGS "${GTSAM_PYTHON_WARNINGS}\n -- Boost Python for python ${GTSAM_PYTHON_VERSION} not found")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# make available at top-level
|
||||
SET(GTSAM_PYTHON_WARNINGS ${GTSAM_PYTHON_WARNINGS} PARENT_SCOPE)
|
||||
|
||||
endif()
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
Python Wrapper and Packaging
|
||||
============================
|
||||
|
||||
This directory contains the basic setup script and directory structure for the gtsam python module.
|
||||
During the build of gtsam, when GTSAM_BUILD_PYTHON is enabled, the following instructions will run.
|
||||
|
||||
* The handwritten module source files are compiled and linked with Boost Python, generating a shared
|
||||
library which can then be imported by python
|
||||
* A setup.py script is configured from setup.py.in
|
||||
* The gtsam packages 'gtsam', 'gtsam_utils', 'gtsam_examples', and 'gtsam_tests' are installed into
|
||||
the site-packages folder within the (possibly non-default) installation prefix folder. If
|
||||
installing to a non-standard prefix, make sure that _prefix_/lib/pythonX.Y/site-packages is
|
||||
present in your $PYTHONPATH
|
||||
|
||||
The target version of Python to create the module can be set by defining GTSAM_PYTHON_VERSION to 'X.Y' (Example: 2.7 or 3.4), or 'Default' if you want to use the default python installed in your system. Note that if you specify a target version of python, you should also have the correspondening Boost
|
||||
Python version installed (Example: libboost_python-py27.so or libboost_python-py34.so on Linux).
|
||||
If you're using the default version, your default Boost Python library (Example: libboost_python.so on Linux) should correspond to the default python version in your system.
|
||||
|
||||
|
|
@ -0,0 +1 @@
|
|||
from ._libgtsam_python import *
|
||||
|
|
@ -0,0 +1,105 @@
|
|||
"""
|
||||
A script validating the ImuFactor inference.
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
import math
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
import gtsam
|
||||
from gtsam_utils import plotPose3
|
||||
from PreintegrationExample import PreintegrationExample, POSES_FIG
|
||||
|
||||
# shorthand symbols:
|
||||
BIAS_KEY = int(gtsam.Symbol('b', 0))
|
||||
V = lambda j: int(gtsam.Symbol('v', j))
|
||||
X = lambda i: int(gtsam.Symbol('x', i))
|
||||
|
||||
class ImuFactorExample(PreintegrationExample):
|
||||
|
||||
def __init__(self):
|
||||
self.velocity = np.array([2, 0, 0])
|
||||
self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1)
|
||||
self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
|
||||
|
||||
forward_twist = (np.zeros(3), self.velocity)
|
||||
loop_twist = (np.array([0, -math.radians(30), 0]), self.velocity)
|
||||
|
||||
accBias = np.array([-0.3, 0.1, 0.2])
|
||||
gyroBias = np.array([0.1, 0.3, -0.1])
|
||||
bias = gtsam.ConstantBias(accBias, gyroBias)
|
||||
|
||||
super(ImuFactorExample, self).__init__(loop_twist, bias)
|
||||
|
||||
def addPrior(self, i, graph):
|
||||
state = self.scenario.navState(i)
|
||||
graph.push_back(gtsam.PriorFactorPose3(X(i), state.pose(), self.priorNoise))
|
||||
graph.push_back(gtsam.PriorFactorVector3(V(i), state.velocity(), self.velNoise))
|
||||
|
||||
def run(self):
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
i = 0 # state index
|
||||
|
||||
# initialize data structure for pre-integrated IMU measurements
|
||||
pim = gtsam.PreintegratedImuMeasurements(self.params, self.actualBias)
|
||||
|
||||
# simulate the loop
|
||||
T = 12
|
||||
actual_state_i = self.scenario.navState(0)
|
||||
for k, t in enumerate(np.arange(0, T, self.dt)):
|
||||
# get measurements and add them to PIM
|
||||
measuredOmega = self.runner.measuredAngularVelocity(t)
|
||||
measuredAcc = self.runner.measuredSpecificForce(t)
|
||||
pim.integrateMeasurement(measuredAcc, measuredOmega, self.dt)
|
||||
|
||||
# Plot IMU many times
|
||||
if k % 10 == 0:
|
||||
self.plotImu(t, measuredOmega, measuredAcc)
|
||||
|
||||
# Plot every second
|
||||
if k % 100 == 0:
|
||||
self.plotGroundTruthPose(t)
|
||||
|
||||
# create IMU factor every second
|
||||
if (k + 1) % 100 == 0:
|
||||
factor = gtsam.ImuFactor(X(i), V(i), X(i + 1), V(i + 1), BIAS_KEY, pim)
|
||||
graph.push_back(factor)
|
||||
pim.resetIntegration()
|
||||
actual_state_i = self.scenario.navState(t + self.dt)
|
||||
i += 1
|
||||
|
||||
# add priors on beginning and end
|
||||
num_poses = i + 1
|
||||
self.addPrior(0, graph)
|
||||
self.addPrior(num_poses - 1, graph)
|
||||
|
||||
initial = gtsam.Values()
|
||||
initial.insert(BIAS_KEY, self.actualBias)
|
||||
for i in range(num_poses):
|
||||
state_i = self.scenario.navState(float(i))
|
||||
initial.insert(X(i), state_i.pose())
|
||||
initial.insert(V(i), state_i.velocity())
|
||||
|
||||
# optimize using Levenberg-Marquardt optimization
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
params.setVerbosityLM("SUMMARY")
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
|
||||
# Plot resulting poses
|
||||
i = 0
|
||||
while result.exists(X(i)):
|
||||
pose_i = result.atPose3(X(i))
|
||||
plotPose3(POSES_FIG, pose_i, 0.1)
|
||||
i += 1
|
||||
print(result.atConstantBias(BIAS_KEY))
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
if __name__ == '__main__':
|
||||
ImuFactorExample().run()
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
#!/usr/bin/env python
|
||||
from __future__ import print_function
|
||||
import gtsam
|
||||
import numpy as np
|
||||
|
||||
# Create an empty nonlinear factor graph
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# Add a prior on the first pose, setting it to the origin
|
||||
# A prior factor consists of a mean and a noise model (covariance matrix)
|
||||
priorMean = gtsam.Pose2(0.0, 0.0, 0.0) # prior at origin
|
||||
priorNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.1]))
|
||||
graph.add(gtsam.PriorFactorPose2(1, priorMean, priorNoise))
|
||||
|
||||
# Add odometry factors
|
||||
odometry = gtsam.Pose2(2.0, 0.0, 0.0)
|
||||
# For simplicity, we will use the same noise model for each odometry factor
|
||||
odometryNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.2, 0.2, 0.1]))
|
||||
# Create odometry (Between) factors between consecutive poses
|
||||
graph.add(gtsam.BetweenFactorPose2(1, 2, odometry, odometryNoise))
|
||||
graph.add(gtsam.BetweenFactorPose2(2, 3, odometry, odometryNoise))
|
||||
graph.print("\nFactor Graph:\n")
|
||||
|
||||
# Create the data structure to hold the initialEstimate estimate to the solution
|
||||
# For illustrative purposes, these have been deliberately set to incorrect values
|
||||
initial = gtsam.Values()
|
||||
initial.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
initial.insert(2, gtsam.Pose2(2.3, 0.1, -0.2))
|
||||
initial.insert(3, gtsam.Pose2(4.1, 0.1, 0.1))
|
||||
initial.print("\nInitial Estimate:\n")
|
||||
|
||||
# optimize using Levenberg-Marquardt optimization
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = optimizer.optimize()
|
||||
result.print("\nFinal Result:\n")
|
||||
|
|
@ -0,0 +1,129 @@
|
|||
"""
|
||||
A script validating the Preintegration of IMU measurements
|
||||
"""
|
||||
|
||||
import math
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
import gtsam
|
||||
from gtsam_utils import plotPose3
|
||||
|
||||
IMU_FIG = 1
|
||||
POSES_FIG = 2
|
||||
|
||||
class PreintegrationExample(object):
|
||||
|
||||
@staticmethod
|
||||
def defaultParams(g):
|
||||
"""Create default parameters with Z *up* and realistic noise parameters"""
|
||||
params = gtsam.PreintegrationParams.MakeSharedU(g)
|
||||
kGyroSigma = math.radians(0.5) / 60 # 0.5 degree ARW
|
||||
kAccelSigma = 0.1 / 60 # 10 cm VRW
|
||||
params.gyroscopeCovariance = kGyroSigma ** 2 * np.identity(3, np.float)
|
||||
params.accelerometerCovariance = kAccelSigma ** 2 * np.identity(3, np.float)
|
||||
params.integrationCovariance = 0.0000001 ** 2 * np.identity(3, np.float)
|
||||
return params
|
||||
|
||||
def __init__(self, twist=None, bias=None):
|
||||
"""Initialize with given twist, a pair(angularVelocityVector, velocityVector)."""
|
||||
|
||||
# setup interactive plotting
|
||||
plt.ion()
|
||||
|
||||
# Setup loop as default scenario
|
||||
if twist is not None:
|
||||
(W, V) = twist
|
||||
else:
|
||||
# default = loop with forward velocity 2m/s, while pitching up
|
||||
# with angular velocity 30 degree/sec (negative in FLU)
|
||||
W = np.array([0, -math.radians(30), 0])
|
||||
V = np.array([2, 0, 0])
|
||||
|
||||
self.scenario = gtsam.ConstantTwistScenario(W, V)
|
||||
self.dt = 1e-2
|
||||
|
||||
self.maxDim = 5
|
||||
self.labels = list('xyz')
|
||||
self.colors = list('rgb')
|
||||
|
||||
# Create runner
|
||||
self.g = 10 # simple gravity constant
|
||||
self.params = self.defaultParams(self.g)
|
||||
ptr = gtsam.ScenarioPointer(self.scenario)
|
||||
|
||||
if bias is not None:
|
||||
self.actualBias = bias
|
||||
else:
|
||||
accBias = np.array([0, 0.1, 0])
|
||||
gyroBias = np.array([0, 0, 0])
|
||||
self.actualBias = gtsam.ConstantBias(accBias, gyroBias)
|
||||
|
||||
self.runner = gtsam.ScenarioRunner(ptr, self.params, self.dt, self.actualBias)
|
||||
|
||||
def plotImu(self, t, measuredOmega, measuredAcc):
|
||||
plt.figure(IMU_FIG)
|
||||
|
||||
# plot angular velocity
|
||||
omega_b = self.scenario.omega_b(t)
|
||||
for i, (label, color) in enumerate(zip(self.labels, self.colors)):
|
||||
plt.subplot(4, 3, i + 1)
|
||||
plt.scatter(t, omega_b[i], color='k', marker='.')
|
||||
plt.scatter(t, measuredOmega[i], color=color, marker='.')
|
||||
plt.xlabel('angular velocity ' + label)
|
||||
|
||||
# plot acceleration in nav
|
||||
acceleration_n = self.scenario.acceleration_n(t)
|
||||
for i, (label, color) in enumerate(zip(self.labels, self.colors)):
|
||||
plt.subplot(4, 3, i + 4)
|
||||
plt.scatter(t, acceleration_n[i], color=color, marker='.')
|
||||
plt.xlabel('acceleration in nav ' + label)
|
||||
|
||||
# plot acceleration in body
|
||||
acceleration_b = self.scenario.acceleration_b(t)
|
||||
for i, (label, color) in enumerate(zip(self.labels, self.colors)):
|
||||
plt.subplot(4, 3, i + 7)
|
||||
plt.scatter(t, acceleration_b[i], color=color, marker='.')
|
||||
plt.xlabel('acceleration in body ' + label)
|
||||
|
||||
# plot actual specific force, as well as corrupted
|
||||
actual = self.runner.actualSpecificForce(t)
|
||||
for i, (label, color) in enumerate(zip(self.labels, self.colors)):
|
||||
plt.subplot(4, 3, i + 10)
|
||||
plt.scatter(t, actual[i], color='k', marker='.')
|
||||
plt.scatter(t, measuredAcc[i], color=color, marker='.')
|
||||
plt.xlabel('specific force ' + label)
|
||||
|
||||
def plotGroundTruthPose(self, t):
|
||||
# plot ground truth pose, as well as prediction from integrated IMU measurements
|
||||
actualPose = self.scenario.pose(t)
|
||||
plotPose3(POSES_FIG, actualPose, 0.3)
|
||||
t = actualPose.translation()
|
||||
self.maxDim = max([abs(t.x()), abs(t.y()), abs(t.z()), self.maxDim])
|
||||
ax = plt.gca()
|
||||
ax.set_xlim3d(-self.maxDim, self.maxDim)
|
||||
ax.set_ylim3d(-self.maxDim, self.maxDim)
|
||||
ax.set_zlim3d(-self.maxDim, self.maxDim)
|
||||
|
||||
plt.pause(0.01)
|
||||
|
||||
def run(self):
|
||||
# simulate the loop
|
||||
T = 12
|
||||
for i, t in enumerate(np.arange(0, T, self.dt)):
|
||||
measuredOmega = self.runner.measuredAngularVelocity(t)
|
||||
measuredAcc = self.runner.measuredSpecificForce(t)
|
||||
if i % 25 == 0:
|
||||
self.plotImu(t, measuredOmega, measuredAcc)
|
||||
self.plotGroundTruthPose(t)
|
||||
pim = self.runner.integrate(t, self.actualBias, True)
|
||||
predictedNavState = self.runner.predict(pim, self.actualBias)
|
||||
plotPose3(POSES_FIG, predictedNavState.pose(), 0.1)
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
if __name__ == '__main__':
|
||||
PreintegrationExample().run()
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
|
||||
# A structure-from-motion example with landmarks
|
||||
# - The landmarks form a 10 meter cube
|
||||
# - The robot rotates around the landmarks, always facing towards the cube
|
||||
|
||||
import gtsam
|
||||
import numpy as np
|
||||
|
||||
def createPoints():
|
||||
# Create the set of ground-truth landmarks
|
||||
points = [gtsam.Point3(10.0,10.0,10.0),
|
||||
gtsam.Point3(-10.0,10.0,10.0),
|
||||
gtsam.Point3(-10.0,-10.0,10.0),
|
||||
gtsam.Point3(10.0,-10.0,10.0),
|
||||
gtsam.Point3(10.0,10.0,-10.0),
|
||||
gtsam.Point3(-10.0,10.0,-10.0),
|
||||
gtsam.Point3(-10.0,-10.0,-10.0),
|
||||
gtsam.Point3(10.0,-10.0,-10.0)]
|
||||
return points
|
||||
|
||||
def createPoses():
|
||||
# Create the set of ground-truth poses
|
||||
radius = 30.0
|
||||
angles = np.linspace(0,2*np.pi,8,endpoint=False)
|
||||
up = gtsam.Point3(0,0,1)
|
||||
target = gtsam.Point3(0,0,0)
|
||||
poses = []
|
||||
for theta in angles:
|
||||
position = gtsam.Point3(radius*np.cos(theta), radius*np.sin(theta), 0.0)
|
||||
camera = gtsam.PinholeCameraCal3_S2.Lookat(position, target, up)
|
||||
poses.append(camera.pose())
|
||||
return poses
|
||||
|
|
@ -0,0 +1,132 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
import numpy as np
|
||||
import time # for sleep()
|
||||
|
||||
import gtsam
|
||||
from gtsam_examples import SFMdata
|
||||
import gtsam_utils
|
||||
|
||||
# shorthand symbols:
|
||||
X = lambda i: int(gtsam.Symbol('x', i))
|
||||
L = lambda j: int(gtsam.Symbol('l', j))
|
||||
|
||||
def visual_ISAM2_plot(poses, points, result):
|
||||
# VisualISAMPlot plots current state of ISAM2 object
|
||||
# Author: Ellon Paiva
|
||||
# Based on MATLAB version by: Duy Nguyen Ta and Frank Dellaert
|
||||
|
||||
# Declare an id for the figure
|
||||
fignum = 0
|
||||
|
||||
fig = plt.figure(fignum)
|
||||
ax = fig.gca(projection='3d')
|
||||
plt.cla()
|
||||
|
||||
# Plot points
|
||||
# Can't use data because current frame might not see all points
|
||||
# marginals = Marginals(isam.getFactorsUnsafe(), isam.calculateEstimate()) # TODO - this is slow
|
||||
# gtsam.plot3DPoints(result, [], marginals)
|
||||
gtsam_utils.plot3DPoints(fignum, result, 'rx')
|
||||
|
||||
# Plot cameras
|
||||
i = 0
|
||||
while result.exists(X(i)):
|
||||
pose_i = result.atPose3(X(i))
|
||||
gtsam_utils.plotPose3(fignum, pose_i, 10)
|
||||
i += 1
|
||||
|
||||
# draw
|
||||
ax.set_xlim3d(-40, 40)
|
||||
ax.set_ylim3d(-40, 40)
|
||||
ax.set_zlim3d(-40, 40)
|
||||
plt.pause(1)
|
||||
|
||||
def visual_ISAM2_example():
|
||||
plt.ion()
|
||||
|
||||
# Define the camera calibration parameters
|
||||
K = gtsam.Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0)
|
||||
|
||||
# Define the camera observation noise model
|
||||
measurementNoise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v
|
||||
|
||||
# Create the set of ground-truth landmarks
|
||||
points = SFMdata.createPoints()
|
||||
|
||||
# Create the set of ground-truth poses
|
||||
poses = SFMdata.createPoses()
|
||||
|
||||
# Create an iSAM2 object. Unlike iSAM1, which performs periodic batch steps to maintain proper linearization
|
||||
# and efficient variable ordering, iSAM2 performs partial relinearization/reordering at each step. A parameter
|
||||
# structure is available that allows the user to set various properties, such as the relinearization threshold
|
||||
# and type of linear solver. For this example, we we set the relinearization threshold small so the iSAM2 result
|
||||
# will approach the batch result.
|
||||
parameters = gtsam.ISAM2Params()
|
||||
parameters.relinearize_threshold = 0.01
|
||||
parameters.relinearize_skip = 1
|
||||
isam = gtsam.ISAM2(parameters)
|
||||
|
||||
# Create a Factor Graph and Values to hold the new data
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
initialEstimate = gtsam.Values()
|
||||
|
||||
# Loop over the different poses, adding the observations to iSAM incrementally
|
||||
for i, pose in enumerate(poses):
|
||||
|
||||
# Add factors for each landmark observation
|
||||
for j, point in enumerate(points):
|
||||
camera = gtsam.PinholeCameraCal3_S2(pose, K)
|
||||
measurement = camera.project(point)
|
||||
graph.push_back(gtsam.GenericProjectionFactorCal3_S2(measurement, measurementNoise, X(i), L(j), K))
|
||||
|
||||
# Add an initial guess for the current pose
|
||||
# Intentionally initialize the variables off from the ground truth
|
||||
initialEstimate.insert(X(i), pose.compose(gtsam.Pose3(gtsam.Rot3.Rodrigues(-0.1, 0.2, 0.25), gtsam.Point3(0.05, -0.10, 0.20))))
|
||||
|
||||
# If this is the first iteration, add a prior on the first pose to set the coordinate frame
|
||||
# and a prior on the first landmark to set the scale
|
||||
# Also, as iSAM solves incrementally, we must wait until each is observed at least twice before
|
||||
# adding it to iSAM.
|
||||
if(i == 0):
|
||||
# Add a prior on pose x0
|
||||
poseNoise = gtsam.noiseModel.Diagonal.Sigmas(np.array([0.3, 0.3, 0.3, 0.1, 0.1, 0.1])) # 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
|
||||
graph.push_back(gtsam.PriorFactorPose3(X(0), poses[0], poseNoise))
|
||||
|
||||
# Add a prior on landmark l0
|
||||
pointNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
|
||||
graph.push_back(gtsam.PriorFactorPoint3(L(0), points[0], pointNoise)) # add directly to graph
|
||||
|
||||
# Add initial guesses to all observed landmarks
|
||||
# Intentionally initialize the variables off from the ground truth
|
||||
for j, point in enumerate(points):
|
||||
initialEstimate.insert(L(j), point + gtsam.Point3(-0.25, 0.20, 0.15))
|
||||
else:
|
||||
# Update iSAM with the new factors
|
||||
isam.update(graph, initialEstimate)
|
||||
# Each call to iSAM2 update(*) performs one iteration of the iterative nonlinear solver.
|
||||
# If accuracy is desired at the expense of time, update(*) can be called additional times
|
||||
# to perform multiple optimizer iterations every step.
|
||||
isam.update()
|
||||
currentEstimate = isam.calculate_estimate()
|
||||
print("****************************************************")
|
||||
print("Frame", i, ":")
|
||||
for j in range(i + 1):
|
||||
print(X(j), ":", currentEstimate.atPose3(X(j)))
|
||||
|
||||
for j in range(len(points)):
|
||||
print(L(j), ":", currentEstimate.atPoint3(L(j)))
|
||||
|
||||
visual_ISAM2_plot(poses, points, currentEstimate)
|
||||
|
||||
# Clear the factor graph and values for the next iteration
|
||||
graph.resize(0)
|
||||
initialEstimate.clear()
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
if __name__ == '__main__':
|
||||
visual_ISAM2_example()
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
from . import SFMdata
|
||||
from . import VisualISAM2Example
|
||||
|
|
@ -0,0 +1 @@
|
|||
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
import unittest
|
||||
from gtsam import *
|
||||
|
||||
#https://docs.python.org/2/library/unittest.html
|
||||
class TestPoint2(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.point = Point2()
|
||||
|
||||
def test_constructor(self):
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
import math
|
||||
import unittest
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
class TestScenario(unittest.TestCase):
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
def test_loop(self):
|
||||
# Forward velocity 2m/s
|
||||
# Pitch up with angular velocity 6 degree/sec (negative in FLU)
|
||||
v = 2
|
||||
w = math.radians(6)
|
||||
W = np.array([0, -w, 0])
|
||||
V = np.array([v, 0, 0])
|
||||
scenario = gtsam.ConstantTwistScenario(W, V)
|
||||
|
||||
T = 30
|
||||
np.testing.assert_almost_equal(W, scenario.omega_b(T))
|
||||
np.testing.assert_almost_equal(V, scenario.velocity_b(T))
|
||||
np.testing.assert_almost_equal(np.cross(W, V), scenario.acceleration_b(T))
|
||||
|
||||
# R = v/w, so test if loop crests at 2*R
|
||||
R = v / w
|
||||
T30 = scenario.pose(T)
|
||||
np.testing.assert_almost_equal(np.array([-math.pi, 0, -math.pi]), T30.rotation().xyz())
|
||||
self.assert_(gtsam.Point3(0, 0, 2 * R).equals(T30.translation()))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
import math
|
||||
import unittest
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
|
||||
class TestScenarioRunner(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.g = 10 # simple gravity constant
|
||||
|
||||
def test_loop_runner(self):
|
||||
# Forward velocity 2m/s
|
||||
# Pitch up with angular velocity 6 degree/sec (negative in FLU)
|
||||
v = 2
|
||||
w = math.radians(6)
|
||||
W = np.array([0, -w, 0])
|
||||
V = np.array([v, 0, 0])
|
||||
scenario = gtsam.ConstantTwistScenario(W, V)
|
||||
|
||||
dt = 0.1
|
||||
params = gtsam.PreintegrationParams.MakeSharedU(self.g)
|
||||
runner = gtsam.ScenarioRunner(gtsam.ScenarioPointer(scenario), params, dt)
|
||||
|
||||
# Test specific force at time 0: a is pointing up
|
||||
t = 0.0
|
||||
a = w * v
|
||||
np.testing.assert_almost_equal(np.array([0, 0, a + self.g]), runner.actualSpecificForce(t))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
@ -0,0 +1 @@
|
|||
from .plot import *
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from mpl_toolkits.mplot3d import Axes3D as _Axes3D
|
||||
|
||||
def plotPoint3(fignum, point, linespec):
|
||||
fig = plt.figure(fignum)
|
||||
ax = fig.gca(projection='3d')
|
||||
ax.plot([point.x()], [point.y()], [point.z()], linespec)
|
||||
|
||||
|
||||
def plot3DPoints(fignum, values, linespec, marginals=None):
|
||||
# PLOT3DPOINTS Plots the Point3's in a values, with optional covariances
|
||||
# Finds all the Point3 objects in the given Values object and plots them.
|
||||
# If a Marginals object is given, this function will also plot marginal
|
||||
# covariance ellipses for each point.
|
||||
|
||||
keys = values.keys()
|
||||
|
||||
# Plot points and covariance matrices
|
||||
for key in keys:
|
||||
try:
|
||||
p = values.atPoint3(key);
|
||||
# if haveMarginals
|
||||
# P = marginals.marginalCovariance(key);
|
||||
# gtsam.plotPoint3(p, linespec, P);
|
||||
# else
|
||||
plotPoint3(fignum, p, linespec);
|
||||
except RuntimeError:
|
||||
continue
|
||||
# I guess it's not a Point3
|
||||
|
||||
def plotPose3(fignum, pose, axisLength=0.1):
|
||||
# get figure object
|
||||
fig = plt.figure(fignum)
|
||||
ax = fig.gca(projection='3d')
|
||||
|
||||
# get rotation and translation (center)
|
||||
gRp = pose.rotation().matrix() # rotation from pose to global
|
||||
C = pose.translation().vector()
|
||||
|
||||
# draw the camera axes
|
||||
xAxis = C + gRp[:, 0] * axisLength
|
||||
L = np.append(C[np.newaxis], xAxis[np.newaxis], axis=0)
|
||||
ax.plot(L[:, 0], L[:, 1], L[:, 2], 'r-')
|
||||
|
||||
yAxis = C + gRp[:, 1] * axisLength
|
||||
L = np.append(C[np.newaxis], yAxis[np.newaxis], axis=0)
|
||||
ax.plot(L[:, 0], L[:, 1], L[:, 2], 'g-')
|
||||
|
||||
zAxis = C + gRp[:, 2] * axisLength
|
||||
L = np.append(C[np.newaxis], zAxis[np.newaxis], axis=0)
|
||||
ax.plot(L[:, 0], L[:, 1], L[:, 2], 'b-')
|
||||
|
||||
# # plot the covariance
|
||||
# if (nargin>2) && (~isempty(P))
|
||||
# pPp = P(4:6,4:6); % covariance matrix in pose coordinate frame
|
||||
# gPp = gRp*pPp*gRp'; % convert the covariance matrix to global coordinate frame
|
||||
# gtsam.covarianceEllipse3D(C,gPp);
|
||||
# end
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
# get subdirectories list
|
||||
subdirlist(SUBDIRS ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
|
||||
# get the sources needed to compile gtsam python module
|
||||
set(gtsam_python_srcs "")
|
||||
foreach(subdir ${SUBDIRS})
|
||||
file(GLOB ${subdir}_src "${subdir}/*.cpp")
|
||||
list(APPEND gtsam_python_srcs ${${subdir}_src})
|
||||
endforeach()
|
||||
|
||||
# Create the library
|
||||
add_library(gtsam_python SHARED exportgtsam.cpp ${gtsam_python_srcs})
|
||||
set_target_properties(gtsam_python PROPERTIES
|
||||
OUTPUT_NAME gtsam_python
|
||||
SKIP_BUILD_RPATH TRUE
|
||||
CLEAN_DIRECT_OUTPUT 1
|
||||
)
|
||||
target_link_libraries(gtsam_python
|
||||
${Boost_PYTHON${BOOST_PYTHON_VERSION_SUFFIX_UPPERCASE}_LIBRARY}
|
||||
${PYTHON_LIBRARY} gtsam)
|
||||
|
||||
# Cause the library to be output in the correct directory.
|
||||
# TODO: Change below to work on different systems (currently works only with Linux)
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_BINARY_DIR}/python/gtsam/_libgtsam_python.so
|
||||
DEPENDS gtsam_python
|
||||
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:gtsam_python> ${CMAKE_BINARY_DIR}/python/gtsam/_libgtsam_python.so
|
||||
COMMENT "Copying extension module to python/gtsam/_libgtsam_python.so"
|
||||
)
|
||||
add_custom_target(copy_gtsam_python_module ALL DEPENDS ${CMAKE_BINARY_DIR}/python/gtsam/_libgtsam_python.so)
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps FastVector instances to python
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
#include <boost/python/suite/indexing/vector_indexing_suite.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/base/FastVector.h"
|
||||
#include "gtsam/base/types.h" // for Key definition
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
void exportFastVectors(){
|
||||
|
||||
typedef FastVector<Key> KeyVector;
|
||||
|
||||
class_<KeyVector>("KeyVector")
|
||||
.def(vector_indexing_suite<KeyVector>())
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,105 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief exports the python module
|
||||
* @author Andrew Melim
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
#include <boost/cstdint.hpp>
|
||||
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
// base
|
||||
void exportFastVectors();
|
||||
|
||||
// geometry
|
||||
void exportPoint2();
|
||||
void exportPoint3();
|
||||
void exportRot2();
|
||||
void exportRot3();
|
||||
void exportPose2();
|
||||
void exportPose3();
|
||||
void exportPinholeBaseK();
|
||||
void exportPinholeCamera();
|
||||
void exportCal3_S2();
|
||||
|
||||
// inference
|
||||
void exportSymbol();
|
||||
|
||||
// linear
|
||||
void exportNoiseModels();
|
||||
|
||||
// nonlinear
|
||||
void exportValues();
|
||||
void exportNonlinearFactor();
|
||||
void exportNonlinearFactorGraph();
|
||||
void exportLevenbergMarquardtOptimizer();
|
||||
void exportISAM2();
|
||||
|
||||
// slam
|
||||
void exportPriorFactors();
|
||||
void exportBetweenFactors();
|
||||
void exportGenericProjectionFactor();
|
||||
|
||||
// navigation
|
||||
void exportImuFactor();
|
||||
void exportScenario();
|
||||
|
||||
|
||||
// Utils (or Python wrapper specific functions)
|
||||
void registerNumpyEigenConversions();
|
||||
|
||||
//-----------------------------------//
|
||||
|
||||
BOOST_PYTHON_MODULE(_libgtsam_python){
|
||||
|
||||
// NOTE: We need to call import_array1() instead of import_array() to support both python 2
|
||||
// and 3. The reason is that BOOST_PYTHON_MODULE puts all its contents in a function
|
||||
// returning void, and import_array() is a macro that when expanded for python 3, adds
|
||||
// a 'return __null' statement to that function. For more info check files:
|
||||
// boost/python/module_init.hpp and numpy/__multiarray_api.h (bottom of the file).
|
||||
// Should be the first thing to be done
|
||||
import_array1();
|
||||
|
||||
registerNumpyEigenConversions();
|
||||
|
||||
exportFastVectors();
|
||||
|
||||
exportPoint2();
|
||||
exportPoint3();
|
||||
exportRot2();
|
||||
exportRot3();
|
||||
exportPose2();
|
||||
exportPose3();
|
||||
exportPinholeBaseK();
|
||||
exportPinholeCamera();
|
||||
exportCal3_S2();
|
||||
|
||||
exportSymbol();
|
||||
|
||||
exportNoiseModels();
|
||||
|
||||
exportValues();
|
||||
exportNonlinearFactor();
|
||||
exportNonlinearFactorGraph();
|
||||
exportLevenbergMarquardtOptimizer();
|
||||
exportISAM2();
|
||||
|
||||
exportPriorFactors();
|
||||
exportBetweenFactors();
|
||||
exportGenericProjectionFactor();
|
||||
|
||||
exportImuFactor();
|
||||
exportScenario();
|
||||
}
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps Cal3_S2 class to python
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/geometry/Cal3_S2.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, Cal3_S2::print, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(equals_overloads, Cal3_S2::equals, 1, 2)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(uncalibrate_overloads, Cal3_S2::uncalibrate, 1, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(calibrate_overloads, Cal3_S2::calibrate, 1, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(between_overloads, Cal3_S2::between, 1, 3)
|
||||
|
||||
// Function pointers to desambiguate Cal3_S2::calibrate calls
|
||||
Point2 (Cal3_S2::*calibrate1)(const Point2 &, OptionalJacobian< 2, 5 > Dcal, OptionalJacobian< 2, 2 > Dp) const = &Cal3_S2::calibrate;
|
||||
Vector3 (Cal3_S2::*calibrate2)(const Vector3 &) const = &Cal3_S2::calibrate;
|
||||
|
||||
void exportCal3_S2(){
|
||||
|
||||
class_<Cal3_S2>("Cal3_S2", init<>())
|
||||
.def(init<double,double,double,double,double>())
|
||||
.def(init<const Vector &>())
|
||||
.def(init<double,int,int>())
|
||||
.def(init<std::string>())
|
||||
.def("print", &Cal3_S2::print, print_overloads(args("s")))
|
||||
.def("equals", &Cal3_S2::equals, equals_overloads(args("q","tol")))
|
||||
.def("fx",&Cal3_S2::fx)
|
||||
.def("fy",&Cal3_S2::fy)
|
||||
.def("skew",&Cal3_S2::skew)
|
||||
.def("px",&Cal3_S2::px)
|
||||
.def("py",&Cal3_S2::py)
|
||||
.def("principal_point",&Cal3_S2::principalPoint)
|
||||
.def("vector",&Cal3_S2::vector)
|
||||
.def("k",&Cal3_S2::K)
|
||||
.def("matrix",&Cal3_S2::matrix)
|
||||
.def("matrix_inverse",&Cal3_S2::matrix_inverse)
|
||||
.def("uncalibrate",&Cal3_S2::uncalibrate, uncalibrate_overloads())
|
||||
.def("calibrate",calibrate1, calibrate_overloads())
|
||||
.def("calibrate",calibrate2)
|
||||
.def("between",&Cal3_S2::between, between_overloads())
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps PinholeCamera classes to python
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/geometry/PinholeCamera.h"
|
||||
#include "gtsam/geometry/Cal3_S2.h"
|
||||
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
typedef PinholeBaseK<Cal3_S2> PinholeBaseKCal3_S2;
|
||||
|
||||
// Wrapper on PinholeBaseK<Cal3_S2> because it has pure virtual method calibration()
|
||||
struct PinholeBaseKCal3_S2Callback : PinholeBaseKCal3_S2, wrapper<PinholeBaseKCal3_S2>
|
||||
{
|
||||
const Cal3_S2 & calibration () const {
|
||||
return this->get_override("calibration")();
|
||||
}
|
||||
};
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(project_overloads, PinholeBaseKCal3_S2::project, 2, 4)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(range_overloads, PinholeBaseKCal3_S2::range, 1, 3)
|
||||
|
||||
// Function pointers to desambiguate project() calls
|
||||
Point2 (PinholeBaseKCal3_S2::*project1) (const Point3 &pw) const = &PinholeBaseKCal3_S2::project;
|
||||
Point2 (PinholeBaseKCal3_S2::*project2) (const Point3 &pw, OptionalJacobian< 2, 6 > Dpose, OptionalJacobian< 2, 3 > Dpoint, OptionalJacobian< 2, FixedDimension<Cal3_S2>::value > Dcal) const = &PinholeBaseKCal3_S2::project;
|
||||
Point2 (PinholeBaseKCal3_S2::*project3) (const Unit3 &pw, OptionalJacobian< 2, 6 > Dpose, OptionalJacobian< 2, 2 > Dpoint, OptionalJacobian< 2, FixedDimension<Cal3_S2>::value > Dcal) const = &PinholeBaseKCal3_S2::project;
|
||||
|
||||
// function pointers to desambiguate range() calls
|
||||
double (PinholeBaseKCal3_S2::*range1) (const Point3 &point, OptionalJacobian< 1, 6 > Dcamera, OptionalJacobian< 1, 3 > Dpoint) const = &PinholeBaseKCal3_S2::range;
|
||||
double (PinholeBaseKCal3_S2::*range2) (const Pose3 &pose, OptionalJacobian< 1, 6 > Dcamera, OptionalJacobian< 1, 6 > Dpose) const = &PinholeBaseKCal3_S2::range;
|
||||
double (PinholeBaseKCal3_S2::*range3) (const CalibratedCamera &camera, OptionalJacobian< 1, 6 > Dcamera, OptionalJacobian< 1, 6 > Dother) const = &PinholeBaseKCal3_S2::range;
|
||||
|
||||
void exportPinholeBaseK(){
|
||||
|
||||
class_<PinholeBaseKCal3_S2Callback, boost::noncopyable>("PinholeBaseKCal3_S2", no_init)
|
||||
.def("calibration", pure_virtual(&PinholeBaseKCal3_S2::calibration), return_value_policy<copy_const_reference>())
|
||||
.def("project", project1)
|
||||
.def("project", project2, project_overloads())
|
||||
.def("project", project3, project_overloads())
|
||||
.def("backproject", &PinholeBaseKCal3_S2::backproject)
|
||||
.def("backproject_point_at_infinity", &PinholeBaseKCal3_S2::backprojectPointAtInfinity)
|
||||
.def("range", range1, range_overloads())
|
||||
.def("range", range2, range_overloads())
|
||||
.def("range", range3, range_overloads())
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps PinholeCamera classes to python
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/geometry/PinholeCamera.h"
|
||||
#include "gtsam/geometry/Cal3_S2.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
typedef PinholeBaseK<Cal3_S2> PinholeBaseKCal3_S2;
|
||||
typedef PinholeCamera<Cal3_S2> PinholeCameraCal3_S2;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, PinholeCameraCal3_S2::print, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(equals_overloads, PinholeCameraCal3_S2::equals, 1, 2)
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(Lookat_overloads, PinholeCameraCal3_S2::Lookat, 3, 4)
|
||||
|
||||
void exportPinholeCamera(){
|
||||
|
||||
class_<PinholeCameraCal3_S2, bases<PinholeBaseKCal3_S2> >("PinholeCameraCal3_S2", init<>())
|
||||
.def(init<const Pose3 &>())
|
||||
.def(init<const Pose3 &, const Cal3_S2 &>())
|
||||
.def(init<const Vector &>())
|
||||
.def(init<const Vector &, const Vector &>())
|
||||
.def("print", &PinholeCameraCal3_S2::print, print_overloads(args("s")))
|
||||
.def("equals", &PinholeCameraCal3_S2::equals, equals_overloads(args("q","tol")))
|
||||
.def("pose", &PinholeCameraCal3_S2::pose, return_value_policy<copy_const_reference>())
|
||||
// We don't need to define calibration() here because it's already defined as virtual in the base class PinholeBaseKCal3_S2
|
||||
// .def("calibration", &PinholeCameraCal3_S2::calibration, return_value_policy<copy_const_reference>())
|
||||
.def("Lookat", &PinholeCameraCal3_S2::Lookat, Lookat_overloads())
|
||||
.staticmethod("Lookat")
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps Point2 class to python
|
||||
* @author Andrew Melim
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/geometry/Point2.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, Point2::print, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(equals_overloads, Point2::equals, 1, 2)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(compose_overloads, Point2::compose, 1, 3)
|
||||
|
||||
void exportPoint2(){
|
||||
|
||||
class_<Point2>("Point2", init<>())
|
||||
.def(init<double, double>())
|
||||
.def(init<const Vector2 &>())
|
||||
.def("identity", &Point2::identity)
|
||||
.def("dist", &Point2::dist)
|
||||
.def("distance", &Point2::distance)
|
||||
.def("equals", &Point2::equals, equals_overloads(args("q","tol")))
|
||||
.def("norm", &Point2::norm)
|
||||
.def("print", &Point2::print, print_overloads(args("s")))
|
||||
.def("unit", &Point2::unit)
|
||||
.def("vector", &Point2::vector)
|
||||
.def("x", &Point2::x)
|
||||
.def("y", &Point2::y)
|
||||
.def(self * other<double>()) // __mult__
|
||||
.def(other<double>() * self) // __mult__
|
||||
.def(self + self)
|
||||
.def(-self)
|
||||
.def(self - self)
|
||||
.def(self / other<double>())
|
||||
.def(self_ns::str(self))
|
||||
.def(repr(self))
|
||||
.def(self == self)
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps Point3 class to python
|
||||
* @author Andrew Melim
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/geometry/Point3.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, Point3::print, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(equals_overloads, Point3::equals, 1, 2)
|
||||
|
||||
void exportPoint3(){
|
||||
|
||||
class_<Point3>("Point3")
|
||||
.def(init<>())
|
||||
.def(init<double,double,double>())
|
||||
.def(init<const Vector3 &>())
|
||||
.def("identity", &Point3::identity)
|
||||
.staticmethod("identity")
|
||||
.def("add", &Point3::add)
|
||||
.def("cross", &Point3::cross)
|
||||
.def("dist", &Point3::dist)
|
||||
.def("distance", &Point3::distance)
|
||||
.def("dot", &Point3::dot)
|
||||
.def("equals", &Point3::equals, equals_overloads(args("q","tol")))
|
||||
.def("norm", &Point3::norm)
|
||||
.def("normalize", &Point3::normalize)
|
||||
.def("print", &Point3::print, print_overloads(args("s")))
|
||||
.def("sub", &Point3::sub)
|
||||
.def("vector", &Point3::vector)
|
||||
.def("x", &Point3::x)
|
||||
.def("y", &Point3::y)
|
||||
.def("z", &Point3::z)
|
||||
.def(self * other<double>())
|
||||
.def(other<double>() * self)
|
||||
.def(self + self)
|
||||
.def(-self)
|
||||
.def(self - self)
|
||||
.def(self / other<double>())
|
||||
.def(self_ns::str(self))
|
||||
.def(repr(self))
|
||||
.def(self == self)
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps Pose2 class to python
|
||||
* @author Andrew Melim
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/geometry/Pose2.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, Pose2::print, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(equals_overloads, Pose2::equals, 1, 2)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(compose_overloads, Pose2::compose, 1, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(between_overloads, Pose2::between, 1, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(transform_to_overloads, Pose2::transform_to, 1, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(transform_from_overloads, Pose2::transform_from, 1, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(bearing_overloads, Pose2::bearing, 1, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(range_overloads, Pose2::range, 1, 3)
|
||||
|
||||
// Manually wrap
|
||||
|
||||
void exportPose2(){
|
||||
|
||||
// double (Pose2::*range1)(const Pose2&, boost::optional<Matrix&>, boost::optional<Matrix&>) const
|
||||
// = &Pose2::range;
|
||||
// double (Pose2::*range2)(const Point2&, boost::optional<Matrix&>, boost::optional<Matrix&>) const
|
||||
// = &Pose2::range;
|
||||
|
||||
// Rot2 (Pose2::*bearing1)(const Pose2&, boost::optional<Matrix&>, boost::optional<Matrix&>) const
|
||||
// = &Pose2::bearing;
|
||||
// Rot2 (Pose2::*bearing2)(const Point2&, boost::optional<Matrix&>, boost::optional<Matrix&>) const
|
||||
// = &Pose2::bearing;
|
||||
|
||||
class_<Pose2>("Pose2", init<>())
|
||||
.def(init<Pose2>())
|
||||
.def(init<double, double, double>())
|
||||
.def(init<double, Point2>())
|
||||
.def("print", &Pose2::print, print_overloads(args("s")))
|
||||
|
||||
.def("equals", &Pose2::equals, equals_overloads(args("pose","tol")))
|
||||
// .def("inverse", &Pose2::inverse)
|
||||
// .def("compose", &Pose2::compose, compose_overloads(args("p2", "H1", "H2")))
|
||||
// .def("between", &Pose2::between, between_overloads(args("p2", "H1", "H2")))
|
||||
// .def("dim", &Pose2::dim)
|
||||
// .def("retract", &Pose2::retract)
|
||||
|
||||
.def("transform_to", &Pose2::transform_to,
|
||||
transform_to_overloads(args("point", "H1", "H2")))
|
||||
.def("transform_from", &Pose2::transform_from,
|
||||
transform_to_overloads(args("point", "H1", "H2")))
|
||||
|
||||
.def("x", &Pose2::x)
|
||||
.def("y", &Pose2::y)
|
||||
.def("theta", &Pose2::theta)
|
||||
// See documentation on call policy for more information
|
||||
// https://wiki.python.org/moin/boost.python/CallPolicy
|
||||
.def("t", &Pose2::t, return_value_policy<copy_const_reference>())
|
||||
.def("r", &Pose2::r, return_value_policy<copy_const_reference>())
|
||||
.def("translation", &Pose2::translation, return_value_policy<copy_const_reference>())
|
||||
.def("rotation", &Pose2::rotation, return_value_policy<copy_const_reference>())
|
||||
|
||||
// .def("bearing", bearing1, bearing_overloads())
|
||||
// .def("bearing", bearing2, bearing_overloads())
|
||||
|
||||
// Function overload example
|
||||
// .def("range", range1, range_overloads())
|
||||
// .def("range", range2, range_overloads())
|
||||
|
||||
|
||||
.def("Expmap", &Pose2::Expmap)
|
||||
.staticmethod("Expmap")
|
||||
|
||||
.def(self * self) // __mult__
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps Pose3 class to python
|
||||
* @author Andrew Melim
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/geometry/Pose3.h"
|
||||
#include "gtsam/geometry/Pose2.h"
|
||||
#include "gtsam/geometry/Point3.h"
|
||||
#include "gtsam/geometry/Rot3.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, Pose3::print, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(equals_overloads, Pose3::equals, 1, 2)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(transform_to_overloads, Pose3::transform_to, 1, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(transform_from_overloads, Pose3::transform_from, 1, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(translation_overloads, Pose3::translation, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(compose_overloads, Pose3::compose, 2, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(between_overloads, Pose3::between, 2, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(bearing_overloads, Pose3::bearing, 1, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(range_overloads, Pose3::range, 1, 3)
|
||||
|
||||
void exportPose3(){
|
||||
|
||||
// function pointers to desambiguate transform_to() calls
|
||||
Point3 (Pose3::*transform_to1)(const Point3&, OptionalJacobian< 3, 6 >, OptionalJacobian< 3, 3 > ) const = &Pose3::transform_to;
|
||||
Pose3 (Pose3::*transform_to2)(const Pose3&) const = &Pose3::transform_to;
|
||||
// function pointers to desambiguate compose() calls
|
||||
Pose3 (Pose3::*compose1)(const Pose3 &g) const = &Pose3::compose;
|
||||
Pose3 (Pose3::*compose2)(const Pose3 &g, typename Pose3::ChartJacobian, typename Pose3::ChartJacobian) const = &Pose3::compose;
|
||||
// function pointers to desambiguate between() calls
|
||||
Pose3 (Pose3::*between1)(const Pose3 &g) const = &Pose3::between;
|
||||
Pose3 (Pose3::*between2)(const Pose3 &g, typename Pose3::ChartJacobian, typename Pose3::ChartJacobian) const = &Pose3::between;
|
||||
// function pointers to desambiguate range() calls
|
||||
double (Pose3::*range1)(const Point3 &, OptionalJacobian<1,6>, OptionalJacobian<1,3>) const = &Pose3::range;
|
||||
double (Pose3::*range2)(const Pose3 &, OptionalJacobian<1,6>, OptionalJacobian<1,6>) const = &Pose3::range;
|
||||
|
||||
class_<Pose3>("Pose3")
|
||||
.def(init<>())
|
||||
.def(init<const Pose3 &>())
|
||||
.def(init<const Rot3 &,const Point3 &>())
|
||||
.def(init<const Rot3 &,const Vector3 &>())
|
||||
.def(init<const Pose2 &>())
|
||||
.def(init<const Matrix &>())
|
||||
.def("print", &Pose3::print, print_overloads(args("s")))
|
||||
.def("equals", &Pose3::equals, equals_overloads(args("pose","tol")))
|
||||
.def("identity", &Pose3::identity)
|
||||
.staticmethod("identity")
|
||||
.def("bearing", &Pose3::bearing)
|
||||
.def("matrix", &Pose3::matrix)
|
||||
.def("transform_from", &Pose3::transform_from,
|
||||
transform_from_overloads(args("point", "H1", "H2")))
|
||||
.def("transform_to", transform_to1,
|
||||
transform_to_overloads(args("point", "H1", "H2")))
|
||||
.def("transform_to", transform_to2)
|
||||
.def("x", &Pose3::x)
|
||||
.def("y", &Pose3::y)
|
||||
.def("z", &Pose3::z)
|
||||
.def("translation", &Pose3::translation,
|
||||
translation_overloads()[return_value_policy<copy_const_reference>()])
|
||||
.def("rotation", &Pose3::rotation, return_value_policy<copy_const_reference>())
|
||||
.def(self * self) // __mult__
|
||||
.def(self * other<Point3>()) // __mult__
|
||||
.def(self_ns::str(self)) // __str__
|
||||
.def(repr(self)) // __repr__
|
||||
.def("compose", compose1)
|
||||
.def("compose", compose2, compose_overloads())
|
||||
.def("between", between1)
|
||||
.def("between", between2, between_overloads())
|
||||
.def("range", range1, range_overloads())
|
||||
.def("range", range2, range_overloads())
|
||||
.def("bearing", &Pose3::bearing, bearing_overloads())
|
||||
;
|
||||
}
|
||||
|
|
@ -0,0 +1,65 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps Rot2 class to python
|
||||
* @author Andrew Melim
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/geometry/Rot2.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, Rot2::print, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(equals_overloads, Rot2::equals, 1, 2)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(compose_overloads, Rot2::compose, 1, 3)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(relativeBearing_overloads, Rot2::relativeBearing, 1, 3)
|
||||
|
||||
void exportRot2(){
|
||||
|
||||
class_<Rot2>("Rot2", init<>())
|
||||
.def(init<double>())
|
||||
.def("Expmap", &Rot2::Expmap)
|
||||
.staticmethod("Expmap")
|
||||
.def("Logmap", &Rot2::Logmap)
|
||||
.staticmethod("Logmap")
|
||||
.def("atan2", &Rot2::atan2)
|
||||
.staticmethod("atan2")
|
||||
.def("fromAngle", &Rot2::fromAngle)
|
||||
.staticmethod("fromAngle")
|
||||
.def("fromCosSin", &Rot2::fromCosSin)
|
||||
.staticmethod("fromCosSin")
|
||||
.def("fromDegrees", &Rot2::fromDegrees)
|
||||
.staticmethod("fromDegrees")
|
||||
.def("identity", &Rot2::identity)
|
||||
.staticmethod("identity")
|
||||
.def("relativeBearing", &Rot2::relativeBearing)
|
||||
.staticmethod("relativeBearing")
|
||||
.def("c", &Rot2::c)
|
||||
.def("degrees", &Rot2::degrees)
|
||||
.def("equals", &Rot2::equals, equals_overloads(args("q","tol")))
|
||||
.def("matrix", &Rot2::matrix)
|
||||
.def("print", &Rot2::print, print_overloads(args("s")))
|
||||
.def("rotate", &Rot2::rotate)
|
||||
.def("s", &Rot2::s)
|
||||
.def("theta", &Rot2::theta)
|
||||
.def("unrotate", &Rot2::unrotate)
|
||||
.def(self * self) // __mult__
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps Rot3 class to python
|
||||
* @author Andrew Melim
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/geometry/Rot3.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
static Rot3 Quaternion_0(const Vector4& q)
|
||||
{
|
||||
return Rot3::quaternion(q[0],q[1],q[2],q[3]);
|
||||
}
|
||||
|
||||
static Rot3 Quaternion_1(double w, double x, double y, double z)
|
||||
{
|
||||
return Rot3::quaternion(w,x,y,z);
|
||||
}
|
||||
|
||||
// Prototypes used to perform overloading
|
||||
// See: http://www.boost.org/doc/libs/1_59_0/libs/python/doc/tutorial/doc/html/python/functions.html
|
||||
gtsam::Rot3 (*AxisAngle_0)(const Vector3&, double) = &Rot3::AxisAngle;
|
||||
gtsam::Rot3 (*AxisAngle_1)(const gtsam::Point3&, double) = &Rot3::AxisAngle;
|
||||
gtsam::Rot3 (*Rodrigues_0)(const Vector3&) = &Rot3::Rodrigues;
|
||||
gtsam::Rot3 (*Rodrigues_1)(double, double, double) = &Rot3::Rodrigues;
|
||||
gtsam::Rot3 (*RzRyRx_0)(double, double, double) = &Rot3::RzRyRx;
|
||||
gtsam::Rot3 (*RzRyRx_1)(const Vector&) = &Rot3::RzRyRx;
|
||||
Vector (Rot3::*quaternion_0)() const = &Rot3::quaternion;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, Rot3::print, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(equals_overloads, Rot3::equals, 1, 2)
|
||||
|
||||
void exportRot3(){
|
||||
|
||||
class_<Rot3>("Rot3")
|
||||
.def(init<Point3,Point3,Point3>())
|
||||
.def(init<double,double,double,double,double,double,double,double,double>())
|
||||
.def(init<const Matrix3 &>())
|
||||
.def(init<const Matrix &>())
|
||||
.def("Quaternion", Quaternion_0, arg("q"), "Creates a Rot3 from an array [w,x,y,z] representing a quaternion")
|
||||
.def("Quaternion", Quaternion_1, (arg("w"),arg("x"),arg("y"),arg("z")) )
|
||||
.staticmethod("Quaternion")
|
||||
.def("AxisAngle", AxisAngle_0)
|
||||
.def("AxisAngle", AxisAngle_1)
|
||||
.staticmethod("AxisAngle")
|
||||
.def("Expmap", &Rot3::Expmap)
|
||||
.staticmethod("Expmap")
|
||||
.def("ExpmapDerivative", &Rot3::ExpmapDerivative)
|
||||
.staticmethod("ExpmapDerivative")
|
||||
.def("Logmap", &Rot3::Logmap)
|
||||
.staticmethod("Logmap")
|
||||
.def("LogmapDerivative", &Rot3::LogmapDerivative)
|
||||
.staticmethod("LogmapDerivative")
|
||||
.def("Rodrigues", Rodrigues_0)
|
||||
.def("Rodrigues", Rodrigues_1)
|
||||
.staticmethod("Rodrigues")
|
||||
.def("Rx", &Rot3::Rx)
|
||||
.staticmethod("Rx")
|
||||
.def("Ry", &Rot3::Ry)
|
||||
.staticmethod("Ry")
|
||||
.def("Rz", &Rot3::Rz)
|
||||
.staticmethod("Rz")
|
||||
.def("RzRyRx", RzRyRx_0, (arg("x"),arg("y"),arg("z")), "Rotations around Z, Y, then X axes as in http://en.wikipedia.org/wiki/Rotation_matrix, counterclockwise when looking from unchanging axis" )
|
||||
.def("RzRyRx", RzRyRx_1, arg("xyz"), "Rotations around Z, Y, then X axes as in http://en.wikipedia.org/wiki/Rotation_matrix, counterclockwise when looking from unchanging axis" )
|
||||
.staticmethod("RzRyRx")
|
||||
.def("identity", &Rot3::identity)
|
||||
.staticmethod("identity")
|
||||
.def("AdjointMap", &Rot3::AdjointMap)
|
||||
.def("column", &Rot3::column)
|
||||
.def("conjugate", &Rot3::conjugate)
|
||||
.def("equals", &Rot3::equals, equals_overloads(args("q","tol")))
|
||||
#ifndef GTSAM_USE_QUATERNIONS
|
||||
.def("localCayley", &Rot3::localCayley)
|
||||
.def("retractCayley", &Rot3::retractCayley)
|
||||
#endif
|
||||
.def("matrix", &Rot3::matrix)
|
||||
.def("print", &Rot3::print, print_overloads(args("s")))
|
||||
.def("r1", &Rot3::r1)
|
||||
.def("r2", &Rot3::r2)
|
||||
.def("r3", &Rot3::r3)
|
||||
.def("rpy", &Rot3::rpy)
|
||||
.def("slerp", &Rot3::slerp)
|
||||
.def("transpose", &Rot3::transpose)
|
||||
.def("xyz", &Rot3::xyz)
|
||||
.def("quaternion", quaternion_0)
|
||||
.def(self * self)
|
||||
.def(self * other<Point3>())
|
||||
.def(self * other<Unit3>())
|
||||
.def(self_ns::str(self)) // __str__
|
||||
.def(repr(self)) // __repr__
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,83 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps Symbol class to python
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include <sstream> // for stringstream
|
||||
|
||||
#include "gtsam/inference/Symbol.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, Symbol::print, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(equals_overloads, Symbol::equals, 1, 2)
|
||||
|
||||
// Helper function to allow building a symbol from a python string and a index.
|
||||
static boost::shared_ptr<Symbol> makeSymbol(const std::string &str, size_t j)
|
||||
{
|
||||
if(str.size() > 1)
|
||||
throw std::runtime_error("string argument must have one character only");
|
||||
|
||||
return boost::make_shared<Symbol>(str.at(0),j);
|
||||
}
|
||||
|
||||
// Helper function to print the symbol as "char-and-index" in python
|
||||
std::string selfToString(const Symbol & self)
|
||||
{
|
||||
return (std::string)self;
|
||||
}
|
||||
|
||||
// Helper function to convert a Symbol to int using int() cast in python
|
||||
size_t selfToKey(const Symbol & self)
|
||||
{
|
||||
return self.key();
|
||||
}
|
||||
|
||||
// Helper function to recover symbol's unsigned char as string
|
||||
std::string chrFromSelf(const Symbol & self)
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << self.chr();
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
void exportSymbol(){
|
||||
|
||||
class_<Symbol, boost::shared_ptr<Symbol> >("Symbol")
|
||||
.def(init<>())
|
||||
.def(init<const Symbol &>())
|
||||
.def("__init__", make_constructor(makeSymbol))
|
||||
.def(init<Key>())
|
||||
.def("print", &Symbol::print, print_overloads(args("s")))
|
||||
.def("equals", &Symbol::equals, equals_overloads(args("q","tol")))
|
||||
.def("key", &Symbol::key)
|
||||
.def("index", &Symbol::index)
|
||||
.def(self < self)
|
||||
.def(self == self)
|
||||
.def(self == other<Key>())
|
||||
.def(self != self)
|
||||
.def(self != other<Key>())
|
||||
.def("__repr__", &selfToString)
|
||||
.def("__int__", &selfToKey)
|
||||
.def("chr", &chrFromSelf)
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps the noise model classes into the noiseModel module
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
/** TODOs Summary:
|
||||
*
|
||||
* TODO(Ellon): Don't know yet it it's worth/needed to add 'Wrap structs' for each of the noise models.
|
||||
* I think it's only worthy if we want to access virtual the virtual functions from python.
|
||||
* TODO(Ellon): Wrap non-pure virtual methods of Base on BaseWrap
|
||||
*/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/linear/NoiseModel.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
using namespace gtsam::noiseModel;
|
||||
|
||||
// Wrap around pure virtual class Base.
|
||||
// All pure virtual methods should be wrapped. Non-pure may be wrapped if we want to mimic the
|
||||
// overloading through inheritance in Python.
|
||||
// See: http://www.boost.org/doc/libs/1_59_0/libs/python/doc/tutorial/doc/html/python/exposing.html#python.class_virtual_functions
|
||||
struct BaseCallback : Base, wrapper<Base>
|
||||
{
|
||||
void print (const std::string & name="") const {
|
||||
this->get_override("print")();
|
||||
}
|
||||
bool equals (const Base & expected, double tol=1e-9) const {
|
||||
return this->get_override("equals")();
|
||||
}
|
||||
Vector whiten (const Vector & v) const {
|
||||
return this->get_override("whiten")();
|
||||
}
|
||||
Matrix Whiten (const Matrix & v) const {
|
||||
return this->get_override("Whiten")();
|
||||
}
|
||||
Vector unwhiten (const Vector & v) const {
|
||||
return this->get_override("unwhiten")();
|
||||
}
|
||||
double distance (const Vector & v) const {
|
||||
return this->get_override("distance")();
|
||||
}
|
||||
void WhitenSystem (std::vector< Matrix > &A, Vector &b) const {
|
||||
this->get_override("WhitenSystem")();
|
||||
}
|
||||
void WhitenSystem (Matrix &A, Vector &b) const {
|
||||
this->get_override("WhitenSystem")();
|
||||
}
|
||||
void WhitenSystem (Matrix &A1, Matrix &A2, Vector &b) const {
|
||||
this->get_override("WhitenSystem")();
|
||||
}
|
||||
void WhitenSystem (Matrix &A1, Matrix &A2, Matrix &A3, Vector &b) const {
|
||||
this->get_override("WhitenSystem")();
|
||||
}
|
||||
|
||||
// TODO(Ellon): Wrap non-pure virtual methods should go here.
|
||||
// See: http://www.boost.org/doc/libs/1_59_0/libs/python/doc/tutorial/doc/html/python/exposing.html#python.virtual_functions_with_default_implementations
|
||||
|
||||
};
|
||||
|
||||
// Overloads for named constructors. Named constructors are static, so we declare them
|
||||
// using BOOST_PYTHON_FUNCTION_OVERLOADS instead of BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS
|
||||
// See: http://www.boost.org/doc/libs/1_59_0/libs/python/doc/tutorial/doc/html/python/functions.html#python.default_arguments
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(Gaussian_SqrtInformation_overloads, Gaussian::SqrtInformation, 1, 2)
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(Gaussian_Information_overloads, Gaussian::Information, 1, 2)
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(Gaussian_Covariance_overloads, Gaussian::Covariance, 1, 2)
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(Diagonal_Sigmas_overloads, Diagonal::Sigmas, 1, 2)
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(Diagonal_Variances_overloads, Diagonal::Variances, 1, 2)
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(Diagonal_Precisions_overloads, Diagonal::Precisions, 1, 2)
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(Isotropic_Sigma_overloads, Isotropic::Sigma, 2, 3)
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(Isotropic_Variance_overloads, Isotropic::Variance, 2, 3)
|
||||
BOOST_PYTHON_FUNCTION_OVERLOADS(Isotropic_Precision_overloads, Isotropic::Precision, 2, 3)
|
||||
|
||||
|
||||
void exportNoiseModels(){
|
||||
|
||||
// Create a scope "noiseModel". See: http://isolation-nation.blogspot.fr/2008/09/packages-in-python-extension-modules.html
|
||||
std::string noiseModel_name = extract<std::string>(scope().attr("__name__") + ".noiseModel");
|
||||
object noiseModel_module(handle<>(borrowed(PyImport_AddModule(noiseModel_name.c_str()))));
|
||||
scope().attr("noiseModel") = noiseModel_module;
|
||||
scope noiseModel_scope = noiseModel_module;
|
||||
|
||||
// Then export our classes in the noiseModel scope
|
||||
class_<BaseCallback,boost::noncopyable>("Base")
|
||||
.def("print", pure_virtual(&Base::print))
|
||||
;
|
||||
|
||||
// NOTE: We should use "Base" in "bases<...>", and not "BaseCallback" (it was not clear at the begining)
|
||||
class_<Gaussian, boost::shared_ptr<Gaussian>, bases<Base> >("Gaussian", no_init)
|
||||
.def("SqrtInformation",&Gaussian::SqrtInformation, Gaussian_SqrtInformation_overloads())
|
||||
.staticmethod("SqrtInformation")
|
||||
.def("Information",&Gaussian::Information, Gaussian_Information_overloads())
|
||||
.staticmethod("Information")
|
||||
.def("Covariance",&Gaussian::Covariance, Gaussian_Covariance_overloads())
|
||||
.staticmethod("Covariance")
|
||||
;
|
||||
|
||||
class_<Diagonal, boost::shared_ptr<Diagonal>, bases<Gaussian> >("Diagonal", no_init)
|
||||
.def("Sigmas",&Diagonal::Sigmas, Diagonal_Sigmas_overloads())
|
||||
.staticmethod("Sigmas")
|
||||
.def("Variances",&Diagonal::Variances, Diagonal_Variances_overloads())
|
||||
.staticmethod("Variances")
|
||||
.def("Precisions",&Diagonal::Precisions, Diagonal_Precisions_overloads())
|
||||
.staticmethod("Precisions")
|
||||
;
|
||||
|
||||
class_<Isotropic, boost::shared_ptr<Isotropic>, bases<Diagonal> >("Isotropic", no_init)
|
||||
.def("Sigma",&Isotropic::Sigma, Isotropic_Sigma_overloads())
|
||||
.staticmethod("Sigma")
|
||||
.def("Variance",&Isotropic::Variance, Isotropic_Variance_overloads())
|
||||
.staticmethod("Variance")
|
||||
.def("Precision",&Isotropic::Precision, Isotropic_Precision_overloads())
|
||||
.staticmethod("Precision")
|
||||
;
|
||||
|
||||
class_<Unit, boost::shared_ptr<Unit>, bases<Isotropic> >("Unit", no_init)
|
||||
.def("Create",&Unit::Create)
|
||||
.staticmethod("Create")
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,94 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps ConstantTwistScenario class to python
|
||||
* @author Frank Dellaert
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/navigation/ImuFactor.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
typedef gtsam::OptionalJacobian<3, 9> OptionalJacobian39;
|
||||
typedef gtsam::OptionalJacobian<9, 6> OptionalJacobian96;
|
||||
typedef gtsam::OptionalJacobian<9, 9> OptionalJacobian9;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, print, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(attitude_overloads, attitude, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(position_overloads, position, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(velocity_overloads, velocity, 0, 1)
|
||||
|
||||
void exportImuFactor() {
|
||||
class_<OptionalJacobian39>("OptionalJacobian39", init<>());
|
||||
class_<OptionalJacobian96>("OptionalJacobian96", init<>());
|
||||
class_<OptionalJacobian9>("OptionalJacobian9", init<>());
|
||||
|
||||
class_<NavState>("NavState", init<>())
|
||||
// TODO(frank): overload with jacobians
|
||||
.def("print", &NavState::print, print_overloads())
|
||||
.def("attitude", &NavState::attitude,
|
||||
attitude_overloads()[return_value_policy<copy_const_reference>()])
|
||||
.def("position", &NavState::position,
|
||||
position_overloads()[return_value_policy<copy_const_reference>()])
|
||||
.def("velocity", &NavState::velocity,
|
||||
velocity_overloads()[return_value_policy<copy_const_reference>()])
|
||||
.def(repr(self))
|
||||
.def("pose", &NavState::pose);
|
||||
|
||||
class_<imuBias::ConstantBias>("ConstantBias", init<>())
|
||||
.def(init<const Vector3&, const Vector3&>())
|
||||
.def(repr(self));
|
||||
|
||||
class_<PreintegrationParams, boost::shared_ptr<PreintegrationParams>>(
|
||||
"PreintegrationParams", init<const Vector3&>())
|
||||
.def_readwrite("gyroscopeCovariance",
|
||||
&PreintegrationParams::gyroscopeCovariance)
|
||||
.def_readwrite("omegaCoriolis", &PreintegrationParams::omegaCoriolis)
|
||||
.def_readwrite("body_P_sensor", &PreintegrationParams::body_P_sensor)
|
||||
.def_readwrite("accelerometerCovariance",
|
||||
&PreintegrationParams::accelerometerCovariance)
|
||||
.def_readwrite("integrationCovariance",
|
||||
&PreintegrationParams::integrationCovariance)
|
||||
.def_readwrite("use2ndOrderCoriolis",
|
||||
&PreintegrationParams::use2ndOrderCoriolis)
|
||||
.def_readwrite("n_gravity", &PreintegrationParams::n_gravity)
|
||||
|
||||
.def("MakeSharedD", &PreintegrationParams::MakeSharedD)
|
||||
.staticmethod("MakeSharedD")
|
||||
.def("MakeSharedU", &PreintegrationParams::MakeSharedU)
|
||||
.staticmethod("MakeSharedU");
|
||||
|
||||
class_<PreintegratedImuMeasurements>(
|
||||
"PreintegratedImuMeasurements",
|
||||
init<const boost::shared_ptr<PreintegrationParams>&,
|
||||
const imuBias::ConstantBias&>())
|
||||
.def(repr(self))
|
||||
.def("predict", &PreintegratedImuMeasurements::predict)
|
||||
.def("computeError", &PreintegratedImuMeasurements::computeError)
|
||||
.def("resetIntegration", &PreintegratedImuMeasurements::resetIntegration)
|
||||
.def("integrateMeasurement",
|
||||
&PreintegratedImuMeasurements::integrateMeasurement)
|
||||
.def("preintMeasCov", &PreintegratedImuMeasurements::preintMeasCov);
|
||||
|
||||
// NOTE(frank): Abstract classes need boost::noncopyable
|
||||
class_<ImuFactor, bases<NonlinearFactor>, boost::shared_ptr<ImuFactor>>(
|
||||
"ImuFactor")
|
||||
.def("error", &ImuFactor::error)
|
||||
.def(init<Key, Key, Key, Key, Key, const PreintegratedImuMeasurements&>())
|
||||
.def(repr(self));
|
||||
}
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps ConstantTwistScenario class to python
|
||||
* @author Frank Dellaert
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/navigation/ScenarioRunner.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
// Create const Scenario pointer from ConstantTwistScenario
|
||||
static const Scenario* ScenarioPointer(const ConstantTwistScenario& scenario) {
|
||||
return static_cast<const Scenario*>(&scenario);
|
||||
}
|
||||
|
||||
void exportScenario() {
|
||||
// NOTE(frank): Abstract classes need boost::noncopyable
|
||||
class_<Scenario, boost::noncopyable>("Scenario", no_init);
|
||||
|
||||
// TODO(frank): figure out how to do inheritance
|
||||
class_<ConstantTwistScenario>("ConstantTwistScenario",
|
||||
init<const Vector3&, const Vector3&>())
|
||||
.def("pose", &Scenario::pose)
|
||||
.def("omega_b", &Scenario::omega_b)
|
||||
.def("velocity_n", &Scenario::velocity_n)
|
||||
.def("acceleration_n", &Scenario::acceleration_n)
|
||||
.def("navState", &Scenario::navState)
|
||||
.def("rotation", &Scenario::rotation)
|
||||
.def("velocity_b", &Scenario::velocity_b)
|
||||
.def("acceleration_b", &Scenario::acceleration_b);
|
||||
|
||||
// NOTE(frank): https://wiki.python.org/moin/boost.python/CallPolicy
|
||||
def("ScenarioPointer", &ScenarioPointer,
|
||||
return_value_policy<reference_existing_object>());
|
||||
|
||||
class_<ScenarioRunner>(
|
||||
"ScenarioRunner",
|
||||
init<const Scenario*, const boost::shared_ptr<PreintegrationParams>&,
|
||||
double, const imuBias::ConstantBias&>())
|
||||
.def("actualSpecificForce", &ScenarioRunner::actualSpecificForce)
|
||||
.def("measuredAngularVelocity", &ScenarioRunner::measuredAngularVelocity)
|
||||
.def("measuredSpecificForce", &ScenarioRunner::measuredSpecificForce)
|
||||
.def("imuSampleTime", &ScenarioRunner::imuSampleTime,
|
||||
return_value_policy<copy_const_reference>())
|
||||
.def("integrate", &ScenarioRunner::integrate)
|
||||
.def("predict", &ScenarioRunner::predict)
|
||||
.def("estimateCovariance", &ScenarioRunner::estimateCovariance);
|
||||
}
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief exports ISAM2 class to python
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/nonlinear/ISAM2.h"
|
||||
#include "gtsam/geometry/Pose3.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(update_overloads, ISAM2::update, 0, 7)
|
||||
|
||||
void exportISAM2(){
|
||||
|
||||
// TODO(Ellon): Export all properties of ISAM2Params
|
||||
class_<ISAM2Params>("ISAM2Params")
|
||||
.add_property("relinearize_skip", &ISAM2Params::getRelinearizeSkip, &ISAM2Params::setRelinearizeSkip)
|
||||
.add_property("enable_relinearization", &ISAM2Params::isEnableRelinearization, &ISAM2Params::setEnableRelinearization)
|
||||
.add_property("evaluate_non_linear_error", &ISAM2Params::isEvaluateNonlinearError, &ISAM2Params::setEvaluateNonlinearError)
|
||||
.add_property("factorization", &ISAM2Params::getFactorization, &ISAM2Params::setFactorization)
|
||||
.add_property("cache_linearized_factors", &ISAM2Params::isCacheLinearizedFactors, &ISAM2Params::setCacheLinearizedFactors)
|
||||
.add_property("enable_detailed_results", &ISAM2Params::isEnableDetailedResults, &ISAM2Params::setEnableDetailedResults)
|
||||
.add_property("enable_partial_linearization_check", &ISAM2Params::isEnablePartialRelinearizationCheck, &ISAM2Params::setEnablePartialRelinearizationCheck)
|
||||
// TODO(Ellon): Check if it works with FastMap; Implement properly if it doesn't.
|
||||
.add_property("relinearization_threshold", &ISAM2Params::getRelinearizeThreshold, &ISAM2Params::setRelinearizeThreshold)
|
||||
// TODO(Ellon): Wrap the following setters/getters:
|
||||
// void setOptimizationParams (OptimizationParams optimizationParams)
|
||||
// OptimizationParams getOptimizationParams () const
|
||||
// void setKeyFormatter (KeyFormatter keyFormatter)
|
||||
// KeyFormatter getKeyFormatter () const
|
||||
// GaussianFactorGraph::Eliminate getEliminationFunction () const
|
||||
;
|
||||
|
||||
// TODO(Ellon): Export useful methods/properties of ISAM2Result
|
||||
class_<ISAM2Result>("ISAM2Result")
|
||||
;
|
||||
|
||||
// Function pointers for overloads in ISAM2
|
||||
Values (ISAM2::*calculateEstimate_0)() const = &ISAM2::calculateEstimate;
|
||||
|
||||
class_<ISAM2>("ISAM2")
|
||||
.def(init<const ISAM2Params &>())
|
||||
// TODO(Ellon): wrap all optional values of update
|
||||
.def("update",&ISAM2::update, update_overloads())
|
||||
.def("calculate_estimate", calculateEstimate_0)
|
||||
.def("calculate_pose3_estimate", &ISAM2::calculateEstimate<Pose3>, (arg("self"), arg("key")) )
|
||||
.def("value_exists", &ISAM2::valueExists)
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
void exportLevenbergMarquardtOptimizer(){
|
||||
class_<LevenbergMarquardtParams>("LevenbergMarquardtParams", init<>())
|
||||
.def("setDiagonalDamping", &LevenbergMarquardtParams::setDiagonalDamping)
|
||||
.def("setlambdaFactor", &LevenbergMarquardtParams::setlambdaFactor)
|
||||
.def("setlambdaInitial", &LevenbergMarquardtParams::setlambdaInitial)
|
||||
.def("setlambdaLowerBound", &LevenbergMarquardtParams::setlambdaLowerBound)
|
||||
.def("setlambdaUpperBound", &LevenbergMarquardtParams::setlambdaUpperBound)
|
||||
.def("setLogFile", &LevenbergMarquardtParams::setLogFile)
|
||||
.def("setUseFixedLambdaFactor", &LevenbergMarquardtParams::setUseFixedLambdaFactor)
|
||||
.def("setVerbosityLM", &LevenbergMarquardtParams::setVerbosityLM)
|
||||
;
|
||||
|
||||
class_<LevenbergMarquardtOptimizer>("LevenbergMarquardtOptimizer",
|
||||
init<const NonlinearFactorGraph&, const Values&, const LevenbergMarquardtParams&>())
|
||||
.def("optimize", &LevenbergMarquardtOptimizer::optimize, return_value_policy<copy_const_reference>())
|
||||
;
|
||||
}
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief exports virtual class NonlinearFactor to python
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/nonlinear/NonlinearFactor.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
// Wrap around pure virtual class NonlinearFactor.
|
||||
// All pure virtual methods should be wrapped. Non-pure may be wrapped if we want to mimic the
|
||||
// overloading through inheritance in Python.
|
||||
// See: http://www.boost.org/doc/libs/1_59_0/libs/python/doc/tutorial/doc/html/python/exposing.html#python.class_virtual_functions
|
||||
struct NonlinearFactorCallback : NonlinearFactor, wrapper<NonlinearFactor>
|
||||
{
|
||||
double error (const Values & values) const {
|
||||
return this->get_override("error")(values);
|
||||
}
|
||||
size_t dim () const {
|
||||
return this->get_override("dim")();
|
||||
}
|
||||
boost::shared_ptr<GaussianFactor> linearize(const Values & values) const {
|
||||
return this->get_override("linearize")(values);
|
||||
}
|
||||
};
|
||||
|
||||
void exportNonlinearFactor(){
|
||||
|
||||
class_<NonlinearFactorCallback,boost::noncopyable>("NonlinearFactor")
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief exports NonlinearFactorGraph class to python
|
||||
* @author Andrew Melim
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/nonlinear/NonlinearFactorGraph.h"
|
||||
#include "gtsam/nonlinear/NonlinearFactor.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, NonlinearFactorGraph::print, 0, 1);
|
||||
|
||||
boost::shared_ptr<NonlinearFactor> getNonlinearFactor(
|
||||
const NonlinearFactorGraph& graph, size_t idx) {
|
||||
auto p = boost::dynamic_pointer_cast<NonlinearFactor>(graph.at(idx));
|
||||
if (!p) throw std::runtime_error("No NonlinearFactor at requested index");
|
||||
return p;
|
||||
};
|
||||
|
||||
void exportNonlinearFactorGraph(){
|
||||
|
||||
typedef NonlinearFactorGraph::sharedFactor sharedFactor;
|
||||
|
||||
void (NonlinearFactorGraph::*push_back1)(const sharedFactor&) = &NonlinearFactorGraph::push_back;
|
||||
void (NonlinearFactorGraph::*add1)(const sharedFactor&) = &NonlinearFactorGraph::add;
|
||||
|
||||
class_<NonlinearFactorGraph>("NonlinearFactorGraph", init<>())
|
||||
.def("size",&NonlinearFactorGraph::size)
|
||||
.def("push_back", push_back1)
|
||||
.def("add", add1)
|
||||
.def("resize", &NonlinearFactorGraph::resize)
|
||||
.def("empty", &NonlinearFactorGraph::empty)
|
||||
.def("print", &NonlinearFactorGraph::print, print_overloads(args("s")))
|
||||
;
|
||||
|
||||
def("getNonlinearFactor", getNonlinearFactor);
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps Values class to python
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/nonlinear/Values.h"
|
||||
#include "gtsam/geometry/Point2.h"
|
||||
#include "gtsam/geometry/Rot2.h"
|
||||
#include "gtsam/geometry/Pose2.h"
|
||||
#include "gtsam/geometry/Point3.h"
|
||||
#include "gtsam/geometry/Rot3.h"
|
||||
#include "gtsam/geometry/Pose3.h"
|
||||
#include "gtsam/navigation/ImuBias.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, Values::print, 0, 1);
|
||||
|
||||
void exportValues(){
|
||||
|
||||
typedef imuBias::ConstantBias Bias;
|
||||
|
||||
bool (Values::*exists1)(Key) const = &Values::exists;
|
||||
void (Values::*insert_point2)(Key, const gtsam::Point2&) = &Values::insert;
|
||||
void (Values::*insert_rot2) (Key, const gtsam::Rot2&) = &Values::insert;
|
||||
void (Values::*insert_pose2) (Key, const gtsam::Pose2&) = &Values::insert;
|
||||
void (Values::*insert_point3)(Key, const gtsam::Point3&) = &Values::insert;
|
||||
void (Values::*insert_rot3) (Key, const gtsam::Rot3&) = &Values::insert;
|
||||
void (Values::*insert_pose3) (Key, const gtsam::Pose3&) = &Values::insert;
|
||||
void (Values::*insert_bias) (Key, const Bias&) = &Values::insert;
|
||||
void (Values::*insert_vector3) (Key, const gtsam::Vector3&) = &Values::insert;
|
||||
|
||||
class_<Values>("Values", init<>())
|
||||
.def(init<Values>())
|
||||
.def("clear", &Values::clear)
|
||||
.def("dim", &Values::dim)
|
||||
.def("empty", &Values::empty)
|
||||
.def("equals", &Values::equals)
|
||||
.def("erase", &Values::erase)
|
||||
.def("insert_fixed", &Values::insertFixed)
|
||||
.def("print", &Values::print, print_overloads(args("s")))
|
||||
.def("size", &Values::size)
|
||||
.def("swap", &Values::swap)
|
||||
.def("insert", insert_point2)
|
||||
.def("insert", insert_rot2)
|
||||
.def("insert", insert_pose2)
|
||||
.def("insert", insert_point3)
|
||||
.def("insert", insert_rot3)
|
||||
.def("insert", insert_pose3)
|
||||
.def("insert", insert_bias)
|
||||
.def("insert", insert_vector3)
|
||||
.def("atPoint2", &Values::at<Point2>, return_value_policy<copy_const_reference>())
|
||||
.def("atRot2", &Values::at<Rot2>, return_value_policy<copy_const_reference>())
|
||||
.def("atPose2", &Values::at<Pose2>, return_value_policy<copy_const_reference>())
|
||||
.def("atPoint3", &Values::at<Point3>, return_value_policy<copy_const_reference>())
|
||||
.def("atRot3", &Values::at<Rot3>, return_value_policy<copy_const_reference>())
|
||||
.def("atPose3", &Values::at<Pose3>, return_value_policy<copy_const_reference>())
|
||||
.def("atConstantBias", &Values::at<Bias>, return_value_policy<copy_const_reference>())
|
||||
.def("atVector3", &Values::at<Vector3>, return_value_policy<copy_const_reference>())
|
||||
.def("exists", exists1)
|
||||
.def("keys", &Values::keys)
|
||||
;
|
||||
}
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include <gtsam/slam/BearingFactor.h>
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
using namespace std;
|
||||
|
||||
template<class VALUE>
|
||||
void exportBearingFactor(const std::string& name){
|
||||
class_<VALUE>(name, init<>())
|
||||
;
|
||||
}
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps BetweenFactor for several values to python
|
||||
* @author Andrew Melim
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/slam/BetweenFactor.h"
|
||||
#include "gtsam/geometry/Point2.h"
|
||||
#include "gtsam/geometry/Rot2.h"
|
||||
#include "gtsam/geometry/Pose2.h"
|
||||
#include "gtsam/geometry/Point3.h"
|
||||
#include "gtsam/geometry/Rot3.h"
|
||||
#include "gtsam/geometry/Pose3.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
using namespace std;
|
||||
|
||||
// template<class T>
|
||||
// void exportBetweenFactor(const std::string& name){
|
||||
// class_<T>(name, init<>())
|
||||
// .def(init<Key, Key, T, SharedNoiseModel>())
|
||||
// ;
|
||||
// }
|
||||
|
||||
#define BETWEENFACTOR(T) \
|
||||
class_< BetweenFactor<T>, bases<NonlinearFactor>, boost::shared_ptr< BetweenFactor<T> > >("BetweenFactor"#T) \
|
||||
.def(init<Key,Key,T,noiseModel::Base::shared_ptr>()) \
|
||||
.def("measured", &BetweenFactor<T>::measured, return_internal_reference<>()) \
|
||||
;
|
||||
|
||||
void exportBetweenFactors()
|
||||
{
|
||||
BETWEENFACTOR(Point2)
|
||||
BETWEENFACTOR(Rot2)
|
||||
BETWEENFACTOR(Pose2)
|
||||
BETWEENFACTOR(Point3)
|
||||
BETWEENFACTOR(Rot3)
|
||||
BETWEENFACTOR(Pose3)
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps GenericProjectionFactor for several values to python
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/slam/ProjectionFactor.h"
|
||||
#include "gtsam/geometry/Pose3.h"
|
||||
#include "gtsam/geometry/Point3.h"
|
||||
#include "gtsam/geometry/Cal3_S2.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
using namespace std;
|
||||
|
||||
typedef GenericProjectionFactor<Pose3, Point3, Cal3_S2> GenericProjectionFactorCal3_S2;
|
||||
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(print_overloads, GenericProjectionFactorCal3_S2::print, 0, 1)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(equals_overloads, GenericProjectionFactorCal3_S2::equals, 1, 2)
|
||||
BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(evaluateError_overloads, GenericProjectionFactorCal3_S2::evaluateError, 2, 4)
|
||||
|
||||
void exportGenericProjectionFactor()
|
||||
{
|
||||
|
||||
class_<GenericProjectionFactorCal3_S2, bases<NonlinearFactor> >("GenericProjectionFactorCal3_S2", init<>())
|
||||
.def(init<const Point2 &, SharedNoiseModel, Key, Key, const boost::shared_ptr<Cal3_S2> &, optional<Pose3> >())
|
||||
.def(init<const Point2 &, SharedNoiseModel, Key, Key, const boost::shared_ptr<Cal3_S2> &, bool, bool, optional<Pose3> >())
|
||||
.def("print", &GenericProjectionFactorCal3_S2::print, print_overloads(args("s")))
|
||||
.def("equals", &GenericProjectionFactorCal3_S2::equals, equals_overloads(args("q","tol")))
|
||||
.def("evaluate_error", &GenericProjectionFactorCal3_S2::evaluateError, evaluateError_overloads())
|
||||
.def("measured", &GenericProjectionFactorCal3_S2::measured, return_value_policy<copy_const_reference>())
|
||||
// TODO(Ellon): Find the right return policy when returning a 'const shared_ptr<...> &'
|
||||
// .def("calibration", &GenericProjectionFactorCal3_S2::calibration, return_value_policy<copy_const_reference>())
|
||||
.def("verbose_cheirality", &GenericProjectionFactorCal3_S2::verboseCheirality)
|
||||
.def("throw_cheirality", &GenericProjectionFactorCal3_S2::throwCheirality)
|
||||
;
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief wraps PriorFactor for several values to python
|
||||
* @author Andrew Melim
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/slam/PriorFactor.h"
|
||||
#include "gtsam/geometry/Point2.h"
|
||||
#include "gtsam/geometry/Rot2.h"
|
||||
#include "gtsam/geometry/Pose2.h"
|
||||
#include "gtsam/geometry/Point3.h"
|
||||
#include "gtsam/geometry/Rot3.h"
|
||||
#include "gtsam/geometry/Pose3.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
using namespace std;
|
||||
|
||||
// template< class FACTOR, class VALUE >
|
||||
// void exportPriorFactor(const std::string& name){
|
||||
// class_< FACTOR >(name.c_str(), init<>())
|
||||
// .def(init< Key, VALUE&, SharedNoiseModel >())
|
||||
// ;
|
||||
// }
|
||||
|
||||
#define PRIORFACTOR(VALUE) \
|
||||
class_< PriorFactor<VALUE>, bases<NonlinearFactor>, boost::shared_ptr< PriorFactor<VALUE> > >("PriorFactor"#VALUE) \
|
||||
.def(init<Key,VALUE,noiseModel::Base::shared_ptr>()) \
|
||||
.def("prior", &PriorFactor<VALUE>::prior, return_internal_reference<>()) \
|
||||
;
|
||||
|
||||
void exportPriorFactors()
|
||||
{
|
||||
PRIORFACTOR(Point2)
|
||||
PRIORFACTOR(Rot2)
|
||||
PRIORFACTOR(Pose2)
|
||||
PRIORFACTOR(Point3)
|
||||
PRIORFACTOR(Rot3)
|
||||
PRIORFACTOR(Pose3)
|
||||
PRIORFACTOR(Vector3)
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @brief register conversion matrix between numpy and Eigen
|
||||
* @author Ellon Paiva Mendes (LAAS-CNRS)
|
||||
**/
|
||||
|
||||
#include <boost/python.hpp>
|
||||
|
||||
#define NO_IMPORT_ARRAY
|
||||
#include <numpy_eigen/NumpyEigenConverter.hpp>
|
||||
|
||||
#include "gtsam/base/Matrix.h"
|
||||
#include "gtsam/base/Vector.h"
|
||||
|
||||
using namespace boost::python;
|
||||
using namespace gtsam;
|
||||
|
||||
void registerNumpyEigenConversions()
|
||||
{
|
||||
// NOTE: import array should be called only in the cpp defining the module
|
||||
// import_array();
|
||||
NumpyEigenConverter<Vector>::register_converter();
|
||||
NumpyEigenConverter<Vector1>::register_converter();
|
||||
NumpyEigenConverter<Vector2>::register_converter();
|
||||
NumpyEigenConverter<Vector3>::register_converter();
|
||||
NumpyEigenConverter<Vector4>::register_converter();
|
||||
NumpyEigenConverter<Vector5>::register_converter();
|
||||
NumpyEigenConverter<Vector6>::register_converter();
|
||||
NumpyEigenConverter<Vector7>::register_converter();
|
||||
NumpyEigenConverter<Vector8>::register_converter();
|
||||
NumpyEigenConverter<Vector9>::register_converter();
|
||||
NumpyEigenConverter<Vector10>::register_converter();
|
||||
|
||||
NumpyEigenConverter<Matrix>::register_converter();
|
||||
NumpyEigenConverter<Matrix2>::register_converter();
|
||||
NumpyEigenConverter<Matrix3>::register_converter();
|
||||
NumpyEigenConverter<Matrix4>::register_converter();
|
||||
NumpyEigenConverter<Matrix5>::register_converter();
|
||||
NumpyEigenConverter<Matrix6>::register_converter();
|
||||
NumpyEigenConverter<Matrix7>::register_converter();
|
||||
NumpyEigenConverter<Matrix8>::register_converter();
|
||||
NumpyEigenConverter<Matrix9>::register_converter();
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
from distutils.core import setup
|
||||
|
||||
setup(name='gtsam',
|
||||
version='${GTSAM_VERSION_STRING}',
|
||||
description='GTSAM Python wrapper',
|
||||
license = "BSD",
|
||||
author='Frank Dellaert et. al',
|
||||
author_email='frank.dellaert@gatech.edu',
|
||||
maintainer_email='gtsam@lists.gatech.edu',
|
||||
url='https://collab.cc.gatech.edu/borg/gtsam',
|
||||
package_dir={ '': '${CMAKE_CURRENT_SOURCE_DIR}' },
|
||||
packages=['gtsam', 'gtsam_utils', 'gtsam_examples', 'gtsam_tests'],
|
||||
#package_data={'gtsam' : ['_libgtsam_python.so']}, # location of .so file is relative to package_dir
|
||||
data_files=[('${PY_INSTALL_FOLDER}/gtsam/', ['gtsam/_libgtsam_python.so'])], # location of .so file relative to setup.py
|
||||
)
|
||||
Loading…
Reference in New Issue