Merged in fix/misc_bugfixes_and_cleanup (pull request #136)
Miscellaneous Bugfixes and Cleanuprelease/4.3a0
commit
0efdf3aad0
|
@ -98,7 +98,8 @@ if( NOT cmake_build_type_tolower STREQUAL ""
|
||||||
AND NOT cmake_build_type_tolower STREQUAL "release"
|
AND NOT cmake_build_type_tolower STREQUAL "release"
|
||||||
AND NOT cmake_build_type_tolower STREQUAL "timing"
|
AND NOT cmake_build_type_tolower STREQUAL "timing"
|
||||||
AND NOT cmake_build_type_tolower STREQUAL "profiling"
|
AND NOT cmake_build_type_tolower STREQUAL "profiling"
|
||||||
AND NOT cmake_build_type_tolower STREQUAL "relwithdebinfo")
|
AND NOT cmake_build_type_tolower STREQUAL "relwithdebinfo"
|
||||||
|
AND NOT cmake_build_type_tolower STREQUAL "minsizerel")
|
||||||
message(FATAL_ERROR "Unknown build type \"${CMAKE_BUILD_TYPE}\". Allowed values are None, Debug, Release, Timing, Profiling, RelWithDebInfo (case-insensitive).")
|
message(FATAL_ERROR "Unknown build type \"${CMAKE_BUILD_TYPE}\". Allowed values are None, Debug, Release, Timing, Profiling, RelWithDebInfo (case-insensitive).")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
|
|
@ -131,12 +131,6 @@ else()
|
||||||
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
set(GTSAM_EXPORTED_TARGETS "${GTSAM_EXPORTED_TARGETS}" PARENT_SCOPE)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# Set dataset paths
|
|
||||||
set_property(SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/slam/dataset.cpp"
|
|
||||||
APPEND PROPERTY COMPILE_DEFINITIONS
|
|
||||||
"SOURCE_TREE_DATASET_DIR=\"${PROJECT_SOURCE_DIR}/examples/Data\""
|
|
||||||
"INSTALLED_DATASET_DIR=\"${GTSAM_TOOLBOX_INSTALL_PATH}/gtsam_examples/Data\"")
|
|
||||||
|
|
||||||
# Special cases
|
# Special cases
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
set_property(SOURCE
|
set_property(SOURCE
|
||||||
|
|
|
@ -222,11 +222,7 @@ double norm_2(const Vector& v) {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector reciprocal(const Vector &a) {
|
Vector reciprocal(const Vector &a) {
|
||||||
size_t n = a.size();
|
return a.array().inverse();
|
||||||
Vector b(n);
|
|
||||||
for( size_t i = 0; i < n; i++ )
|
|
||||||
b(i) = 1.0/a(i);
|
|
||||||
return b;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -24,13 +24,6 @@ namespace internal {
|
||||||
template<class Class, int N>
|
template<class Class, int N>
|
||||||
struct VectorSpaceImpl {
|
struct VectorSpaceImpl {
|
||||||
|
|
||||||
/// @name Group
|
|
||||||
/// @{
|
|
||||||
static Class Compose(const Class& v1, const Class& v2) { return v1+v2;}
|
|
||||||
static Class Between(const Class& v1, const Class& v2) { return v2-v1;}
|
|
||||||
static Class Inverse(const Class& m) { return -m;}
|
|
||||||
/// @}
|
|
||||||
|
|
||||||
/// @name Manifold
|
/// @name Manifold
|
||||||
/// @{
|
/// @{
|
||||||
typedef Eigen::Matrix<double, N, 1> TangentVector;
|
typedef Eigen::Matrix<double, N, 1> TangentVector;
|
||||||
|
@ -68,21 +61,21 @@ struct VectorSpaceImpl {
|
||||||
return Class(v);
|
return Class(v);
|
||||||
}
|
}
|
||||||
|
|
||||||
static Class Compose(const Class& v1, const Class& v2, ChartJacobian H1,
|
static Class Compose(const Class& v1, const Class& v2, ChartJacobian H1 = boost::none,
|
||||||
ChartJacobian H2) {
|
ChartJacobian H2 = boost::none) {
|
||||||
if (H1) *H1 = Jacobian::Identity();
|
if (H1) *H1 = Jacobian::Identity();
|
||||||
if (H2) *H2 = Jacobian::Identity();
|
if (H2) *H2 = Jacobian::Identity();
|
||||||
return v1 + v2;
|
return v1 + v2;
|
||||||
}
|
}
|
||||||
|
|
||||||
static Class Between(const Class& v1, const Class& v2, ChartJacobian H1,
|
static Class Between(const Class& v1, const Class& v2, ChartJacobian H1 = boost::none,
|
||||||
ChartJacobian H2) {
|
ChartJacobian H2 = boost::none) {
|
||||||
if (H1) *H1 = - Jacobian::Identity();
|
if (H1) *H1 = - Jacobian::Identity();
|
||||||
if (H2) *H2 = Jacobian::Identity();
|
if (H2) *H2 = Jacobian::Identity();
|
||||||
return v2 - v1;
|
return v2 - v1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static Class Inverse(const Class& v, ChartJacobian H) {
|
static Class Inverse(const Class& v, ChartJacobian H = boost::none) {
|
||||||
if (H) *H = - Jacobian::Identity();
|
if (H) *H = - Jacobian::Identity();
|
||||||
return -v;
|
return -v;
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
#define GTSAM_VERSION_STRING "@GTSAM_VERSION_STRING@"
|
#define GTSAM_VERSION_STRING "@GTSAM_VERSION_STRING@"
|
||||||
|
|
||||||
// Paths to example datasets distributed with GTSAM
|
// Paths to example datasets distributed with GTSAM
|
||||||
#define GTSAM_SOURCE_TREE_DATASET_DIR "@CMAKE_SOURCE_DIR@/examples/Data"
|
#define GTSAM_SOURCE_TREE_DATASET_DIR "@PROJECT_SOURCE_DIR@/examples/Data"
|
||||||
#define GTSAM_INSTALLED_DATASET_DIR "@GTSAM_TOOLBOX_INSTALL_PATH@/gtsam_examples/Data"
|
#define GTSAM_INSTALLED_DATASET_DIR "@GTSAM_TOOLBOX_INSTALL_PATH@/gtsam_examples/Data"
|
||||||
|
|
||||||
// Whether GTSAM is compiled to use quaternions for Rot3 (otherwise uses rotation matrices)
|
// Whether GTSAM is compiled to use quaternions for Rot3 (otherwise uses rotation matrices)
|
||||||
|
|
|
@ -21,7 +21,6 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <gtsam/base/DerivedValue.h>
|
|
||||||
#include <gtsam/geometry/Point2.h>
|
#include <gtsam/geometry/Point2.h>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
|
@ -73,7 +73,7 @@ Vector3 OrientedPlane3::error(const gtsam::OrientedPlane3& plane) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
OrientedPlane3 OrientedPlane3::retract(const Vector& v) const {
|
OrientedPlane3 OrientedPlane3::retract(const Vector3& v) const {
|
||||||
// Retract the Unit3
|
// Retract the Unit3
|
||||||
Vector2 n_v(v(0), v(1));
|
Vector2 n_v(v(0), v(1));
|
||||||
Unit3 n_retracted = n_.retract(n_v);
|
Unit3 n_retracted = n_.retract(n_v);
|
||||||
|
@ -83,7 +83,7 @@ OrientedPlane3 OrientedPlane3::retract(const Vector& v) const {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector3 OrientedPlane3::localCoordinates(const OrientedPlane3& y) const {
|
Vector3 OrientedPlane3::localCoordinates(const OrientedPlane3& y) const {
|
||||||
Vector n_local = n_.localCoordinates(y.n_);
|
Vector2 n_local = n_.localCoordinates(y.n_);
|
||||||
double d_local = d_ - y.d_;
|
double d_local = d_ - y.d_;
|
||||||
Vector3 e;
|
Vector3 e;
|
||||||
e << n_local(0), n_local(1), -d_local;
|
e << n_local(0), n_local(1), -d_local;
|
||||||
|
|
|
@ -51,7 +51,7 @@ public:
|
||||||
n_(s), d_(d) {
|
n_(s), d_(d) {
|
||||||
}
|
}
|
||||||
|
|
||||||
OrientedPlane3(Vector vec) :
|
OrientedPlane3(const Vector4& vec) :
|
||||||
n_(vec(0), vec(1), vec(2)), d_(vec(3)) {
|
n_(vec(0), vec(1), vec(2)), d_(vec(3)) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The retract function
|
/// The retract function
|
||||||
OrientedPlane3 retract(const Vector& v) const;
|
OrientedPlane3 retract(const Vector3& v) const;
|
||||||
|
|
||||||
/// The local coordinates function
|
/// The local coordinates function
|
||||||
Vector3 localCoordinates(const OrientedPlane3& s) const;
|
Vector3 localCoordinates(const OrientedPlane3& s) const;
|
||||||
|
|
|
@ -270,7 +270,10 @@ public:
|
||||||
/// print
|
/// print
|
||||||
void print(const std::string& s = "PinholePose") const {
|
void print(const std::string& s = "PinholePose") const {
|
||||||
Base::print(s);
|
Base::print(s);
|
||||||
K_->print(s + ".calibration");
|
if (!K_)
|
||||||
|
std::cout << "s No calibration given" << std::endl;
|
||||||
|
else
|
||||||
|
K_->print(s + ".calibration");
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @}
|
/// @}
|
||||||
|
|
|
@ -112,7 +112,9 @@ bool Pose3::equals(const Pose3& pose, double tol) const {
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
/** Modified from Murray94book version (which assumes w and v normalized?) */
|
/** Modified from Murray94book version (which assumes w and v normalized?) */
|
||||||
Pose3 Pose3::Expmap(const Vector& xi, OptionalJacobian<6, 6> H) {
|
Pose3 Pose3::Expmap(const Vector& xi, OptionalJacobian<6, 6> H) {
|
||||||
if (H) *H = ExpmapDerivative(xi);
|
if (H) {
|
||||||
|
*H = ExpmapDerivative(xi);
|
||||||
|
}
|
||||||
|
|
||||||
// get angular velocity omega and translational velocity v from twist xi
|
// get angular velocity omega and translational velocity v from twist xi
|
||||||
Point3 w(xi(0), xi(1), xi(2)), v(xi(3), xi(4), xi(5));
|
Point3 w(xi(0), xi(1), xi(2)), v(xi(3), xi(4), xi(5));
|
||||||
|
@ -254,6 +256,14 @@ Matrix6 Pose3::LogmapDerivative(const Pose3& pose) {
|
||||||
return J;
|
return J;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
const Point3& Pose3::translation(OptionalJacobian<3, 6> H) const {
|
||||||
|
if (H) {
|
||||||
|
*H << Z_3x3, rotation().matrix();
|
||||||
|
}
|
||||||
|
return t_;
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Matrix4 Pose3::matrix() const {
|
Matrix4 Pose3::matrix() const {
|
||||||
const Matrix3 R = R_.matrix();
|
const Matrix3 R = R_.matrix();
|
||||||
|
@ -280,8 +290,9 @@ Point3 Pose3::transform_from(const Point3& p, OptionalJacobian<3,6> Dpose,
|
||||||
Matrix3 DR = R * skewSymmetric(-p.x(), -p.y(), -p.z());
|
Matrix3 DR = R * skewSymmetric(-p.x(), -p.y(), -p.z());
|
||||||
(*Dpose) << DR, R;
|
(*Dpose) << DR, R;
|
||||||
}
|
}
|
||||||
if (Dpoint)
|
if (Dpoint) {
|
||||||
*Dpoint = R_.matrix();
|
*Dpoint = R_.matrix();
|
||||||
|
}
|
||||||
return R_ * p + t_;
|
return R_ * p + t_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -299,17 +310,18 @@ Point3 Pose3::transform_to(const Point3& p, OptionalJacobian<3,6> Dpose,
|
||||||
+wz, 0.0, -wx, 0.0,-1.0, 0.0,
|
+wz, 0.0, -wx, 0.0,-1.0, 0.0,
|
||||||
-wy, +wx, 0.0, 0.0, 0.0,-1.0;
|
-wy, +wx, 0.0, 0.0, 0.0,-1.0;
|
||||||
}
|
}
|
||||||
if (Dpoint)
|
if (Dpoint) {
|
||||||
*Dpoint = Rt;
|
*Dpoint = Rt;
|
||||||
|
}
|
||||||
return q;
|
return q;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double Pose3::range(const Point3& point, OptionalJacobian<1, 6> H1,
|
double Pose3::range(const Point3& point, OptionalJacobian<1, 6> H1,
|
||||||
OptionalJacobian<1, 3> H2) const {
|
OptionalJacobian<1, 3> H2) const {
|
||||||
if (!H1 && !H2)
|
if (!H1 && !H2) {
|
||||||
return transform_to(point).norm();
|
return transform_to(point).norm();
|
||||||
else {
|
} else {
|
||||||
Matrix36 D1;
|
Matrix36 D1;
|
||||||
Matrix3 D2;
|
Matrix3 D2;
|
||||||
Point3 d = transform_to(point, H1 ? &D1 : 0, H2 ? &D2 : 0);
|
Point3 d = transform_to(point, H1 ? &D1 : 0, H2 ? &D2 : 0);
|
||||||
|
|
|
@ -223,9 +223,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// get translation
|
/// get translation
|
||||||
const Point3& translation() const {
|
const Point3& translation(OptionalJacobian<3, 6> H = boost::none) const;
|
||||||
return t_;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// get x
|
/// get x
|
||||||
double x() const {
|
double x() const {
|
||||||
|
|
|
@ -119,7 +119,7 @@ Matrix3 Unit3::skew() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector Unit3::error(const Unit3& q, OptionalJacobian<2,2> H) const {
|
Vector2 Unit3::error(const Unit3& q, OptionalJacobian<2,2> H) const {
|
||||||
// 2D error is equal to B'*q, as B is 3x2 matrix and q is 3x1
|
// 2D error is equal to B'*q, as B is 3x2 matrix and q is 3x1
|
||||||
Matrix23 Bt = basis().transpose();
|
Matrix23 Bt = basis().transpose();
|
||||||
Vector2 xi = Bt * q.p_.vector();
|
Vector2 xi = Bt * q.p_.vector();
|
||||||
|
|
|
@ -108,7 +108,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return unit-norm Vector
|
/// Return unit-norm Vector
|
||||||
Vector unitVector(boost::optional<Matrix&> H = boost::none) const {
|
Vector3 unitVector(boost::optional<Matrix&> H = boost::none) const {
|
||||||
if (H)
|
if (H)
|
||||||
*H = basis();
|
*H = basis();
|
||||||
return (p_.vector());
|
return (p_.vector());
|
||||||
|
@ -120,7 +120,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Signed, vector-valued error between two directions
|
/// Signed, vector-valued error between two directions
|
||||||
Vector error(const Unit3& q, OptionalJacobian<2, 2> H = boost::none) const;
|
Vector2 error(const Unit3& q, OptionalJacobian<2, 2> H = boost::none) const;
|
||||||
|
|
||||||
/// Distance between two directions
|
/// Distance between two directions
|
||||||
double distance(const Unit3& q, OptionalJacobian<1, 2> H = boost::none) const;
|
double distance(const Unit3& q, OptionalJacobian<1, 2> H = boost::none) const;
|
||||||
|
|
|
@ -90,11 +90,11 @@ TEST( CalibratedCamera, project)
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST( CalibratedCamera, Dproject_to_camera1) {
|
TEST( CalibratedCamera, Dproject_to_camera1) {
|
||||||
Point3 pp(155,233,131);
|
Point3 pp(155,233,131);
|
||||||
Matrix test1;
|
Matrix actual;
|
||||||
CalibratedCamera::project_to_camera(pp, test1);
|
CalibratedCamera::project_to_camera(pp, actual);
|
||||||
Matrix test2 = numericalDerivative11<Point2,Point3>(
|
Matrix expected_numerical = numericalDerivative11<Point2,Point3>(
|
||||||
boost::bind(CalibratedCamera::project_to_camera, _1, boost::none), pp);
|
boost::bind(CalibratedCamera::project_to_camera, _1, boost::none), pp);
|
||||||
CHECK(assert_equal(test1, test2));
|
CHECK(assert_equal(expected_numerical, actual));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -186,6 +186,17 @@ TEST(Pose3, expmaps_galore_full)
|
||||||
EXPECT(assert_equal(xi, Pose3::Logmap(actual),1e-6));
|
EXPECT(assert_equal(xi, Pose3::Logmap(actual),1e-6));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// Check translation and its pushforward
|
||||||
|
TEST(Pose3, translation) {
|
||||||
|
Matrix actualH;
|
||||||
|
EXPECT(assert_equal(Point3(3.5, -8.2, 4.2), T.translation(actualH), 1e-8));
|
||||||
|
|
||||||
|
Matrix numericalH = numericalDerivative11<Point3, Pose3>(
|
||||||
|
boost::bind(&Pose3::translation, _1, boost::none), T);
|
||||||
|
EXPECT(assert_equal(numericalH, actualH, 1e-6));
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
TEST(Pose3, Adjoint_compose_full)
|
TEST(Pose3, Adjoint_compose_full)
|
||||||
{
|
{
|
||||||
|
|
|
@ -23,14 +23,8 @@
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/**
|
Vector4 triangulateHomogeneousDLT(
|
||||||
* DLT triangulation: See Hartley and Zisserman, 2nd Ed., page 312
|
const std::vector<Matrix34>& projection_matrices,
|
||||||
* @param projection_matrices Projection matrices (K*P^-1)
|
|
||||||
* @param measurements 2D measurements
|
|
||||||
* @param rank_tol SVD rank tolerance
|
|
||||||
* @return Triangulated Point3
|
|
||||||
*/
|
|
||||||
Point3 triangulateDLT(const std::vector<Matrix34>& projection_matrices,
|
|
||||||
const std::vector<Point2>& measurements, double rank_tol) {
|
const std::vector<Point2>& measurements, double rank_tol) {
|
||||||
|
|
||||||
// number of cameras
|
// number of cameras
|
||||||
|
@ -57,7 +51,16 @@ Point3 triangulateDLT(const std::vector<Matrix34>& projection_matrices,
|
||||||
if (rank < 3)
|
if (rank < 3)
|
||||||
throw(TriangulationUnderconstrainedException());
|
throw(TriangulationUnderconstrainedException());
|
||||||
|
|
||||||
// Create 3D point from eigenvector
|
return v;
|
||||||
|
}
|
||||||
|
|
||||||
|
Point3 triangulateDLT(const std::vector<Matrix34>& projection_matrices,
|
||||||
|
const std::vector<Point2>& measurements, double rank_tol) {
|
||||||
|
|
||||||
|
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements,
|
||||||
|
rank_tol);
|
||||||
|
|
||||||
|
// Create 3D point from homogeneous coordinates
|
||||||
return Point3(sub((v / v(3)), 0, 3));
|
return Point3(sub((v / v(3)), 0, 3));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,6 +92,5 @@ Point3 optimize(const NonlinearFactorGraph& graph, const Values& values,
|
||||||
return result.at<Point3>(landmarkKey);
|
return result.at<Point3>(landmarkKey);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
} // \namespace gtsam
|
} // \namespace gtsam
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,6 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
|
||||||
#include <gtsam/slam/TriangulationFactor.h>
|
#include <gtsam/slam/TriangulationFactor.h>
|
||||||
#include <gtsam/slam/PriorFactor.h>
|
#include <gtsam/slam/PriorFactor.h>
|
||||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
|
@ -43,6 +42,17 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DLT triangulation: See Hartley and Zisserman, 2nd Ed., page 312
|
||||||
|
* @param projection_matrices Projection matrices (K*P^-1)
|
||||||
|
* @param measurements 2D measurements
|
||||||
|
* @param rank_tol SVD rank tolerance
|
||||||
|
* @return Triangulated point, in homogeneous coordinates
|
||||||
|
*/
|
||||||
|
GTSAM_EXPORT Vector4 triangulateHomogeneousDLT(
|
||||||
|
const std::vector<Matrix34>& projection_matrices,
|
||||||
|
const std::vector<Point2>& measurements, double rank_tol = 1e-9);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DLT triangulation: See Hartley and Zisserman, 2nd Ed., page 312
|
* DLT triangulation: See Hartley and Zisserman, 2nd Ed., page 312
|
||||||
* @param projection_matrices Projection matrices (K*P^-1)
|
* @param projection_matrices Projection matrices (K*P^-1)
|
||||||
|
@ -52,7 +62,7 @@ public:
|
||||||
*/
|
*/
|
||||||
GTSAM_EXPORT Point3 triangulateDLT(
|
GTSAM_EXPORT Point3 triangulateDLT(
|
||||||
const std::vector<Matrix34>& projection_matrices,
|
const std::vector<Matrix34>& projection_matrices,
|
||||||
const std::vector<Point2>& measurements, double rank_tol);
|
const std::vector<Point2>& measurements, double rank_tol = 1e-9);
|
||||||
|
|
||||||
///
|
///
|
||||||
/**
|
/**
|
||||||
|
@ -164,6 +174,25 @@ Point3 triangulateNonlinear(
|
||||||
return optimize(graph, values, Symbol('p', 0));
|
return optimize(graph, values, Symbol('p', 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a 3*4 camera projection matrix from calibration and pose.
|
||||||
|
* Functor for partial application on calibration
|
||||||
|
* @param pose The camera pose
|
||||||
|
* @param cal The calibration
|
||||||
|
* @return Returns a Matrix34
|
||||||
|
*/
|
||||||
|
template<class CALIBRATION>
|
||||||
|
struct CameraProjectionMatrix {
|
||||||
|
CameraProjectionMatrix(const CALIBRATION& calibration) :
|
||||||
|
K_(calibration.K()) {
|
||||||
|
}
|
||||||
|
Matrix34 operator()(const Pose3& pose) const {
|
||||||
|
return K_ * (pose.inverse().matrix()).block<3, 4>(0, 0);
|
||||||
|
}
|
||||||
|
private:
|
||||||
|
const Matrix3 K_;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Function to triangulate 3D landmark point from an arbitrary number
|
* Function to triangulate 3D landmark point from an arbitrary number
|
||||||
* of poses (at least 2) using the DLT. The function checks that the
|
* of poses (at least 2) using the DLT. The function checks that the
|
||||||
|
@ -188,10 +217,10 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
|
||||||
|
|
||||||
// construct projection matrices from poses & calibration
|
// construct projection matrices from poses & calibration
|
||||||
std::vector<Matrix34> projection_matrices;
|
std::vector<Matrix34> projection_matrices;
|
||||||
BOOST_FOREACH(const Pose3& pose, poses) {
|
CameraProjectionMatrix<CALIBRATION> createP(*sharedCal); // partially apply
|
||||||
projection_matrices.push_back(
|
BOOST_FOREACH(const Pose3& pose, poses)
|
||||||
sharedCal->K() * (pose.inverse().matrix()).block<3,4>(0,0));
|
projection_matrices.push_back(createP(pose));
|
||||||
}
|
|
||||||
// Triangulate linearly
|
// Triangulate linearly
|
||||||
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
||||||
|
|
||||||
|
@ -204,7 +233,7 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
|
||||||
BOOST_FOREACH(const Pose3& pose, poses) {
|
BOOST_FOREACH(const Pose3& pose, poses) {
|
||||||
const Point3& p_local = pose.transform_to(point);
|
const Point3& p_local = pose.transform_to(point);
|
||||||
if (p_local.z() <= 0)
|
if (p_local.z() <= 0)
|
||||||
throw(TriangulationCheiralityException());
|
throw(TriangulationCheiralityException());
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -230,7 +259,7 @@ Point3 triangulatePoint3(
|
||||||
bool optimize = false) {
|
bool optimize = false) {
|
||||||
|
|
||||||
size_t m = cameras.size();
|
size_t m = cameras.size();
|
||||||
assert(measurements.size()==m);
|
assert(measurements.size() == m);
|
||||||
|
|
||||||
if (m < 2)
|
if (m < 2)
|
||||||
throw(TriangulationUnderconstrainedException());
|
throw(TriangulationUnderconstrainedException());
|
||||||
|
@ -238,10 +267,10 @@ Point3 triangulatePoint3(
|
||||||
// construct projection matrices from poses & calibration
|
// construct projection matrices from poses & calibration
|
||||||
typedef PinholeCamera<CALIBRATION> Camera;
|
typedef PinholeCamera<CALIBRATION> Camera;
|
||||||
std::vector<Matrix34> projection_matrices;
|
std::vector<Matrix34> projection_matrices;
|
||||||
BOOST_FOREACH(const Camera& camera, cameras) {
|
BOOST_FOREACH(const Camera& camera, cameras)
|
||||||
Matrix P_ = (camera.pose().inverse().matrix());
|
projection_matrices.push_back(
|
||||||
projection_matrices.push_back(camera.calibration().K()* (P_.block<3,4>(0,0)) );
|
CameraProjectionMatrix<CALIBRATION>(camera.calibration())(
|
||||||
}
|
camera.pose()));
|
||||||
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
||||||
|
|
||||||
// The n refine using non-linear optimization
|
// The n refine using non-linear optimization
|
||||||
|
@ -253,7 +282,7 @@ Point3 triangulatePoint3(
|
||||||
BOOST_FOREACH(const Camera& camera, cameras) {
|
BOOST_FOREACH(const Camera& camera, cameras) {
|
||||||
const Point3& p_local = camera.pose().transform_to(point);
|
const Point3& p_local = camera.pose().transform_to(point);
|
||||||
if (p_local.z() <= 0)
|
if (p_local.z() <= 0)
|
||||||
throw(TriangulationCheiralityException());
|
throw(TriangulationCheiralityException());
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -365,32 +365,50 @@ void JacobianFactor::print(const string& s,
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
// Check if two linear factors are equal
|
// Check if two linear factors are equal
|
||||||
bool JacobianFactor::equals(const GaussianFactor& f_, double tol) const {
|
bool JacobianFactor::equals(const GaussianFactor& f_, double tol) const {
|
||||||
if (!dynamic_cast<const JacobianFactor*>(&f_))
|
static const bool verbose = false;
|
||||||
|
if (!dynamic_cast<const JacobianFactor*>(&f_)) {
|
||||||
|
if (verbose)
|
||||||
|
cout << "JacobianFactor::equals: Incorrect type" << endl;
|
||||||
return false;
|
return false;
|
||||||
else {
|
} else {
|
||||||
const JacobianFactor& f(static_cast<const JacobianFactor&>(f_));
|
const JacobianFactor& f(static_cast<const JacobianFactor&>(f_));
|
||||||
|
|
||||||
// Check keys
|
// Check keys
|
||||||
if (keys() != f.keys())
|
if (keys() != f.keys()) {
|
||||||
|
if (verbose)
|
||||||
|
cout << "JacobianFactor::equals: keys do not match" << endl;
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// Check noise model
|
// Check noise model
|
||||||
if ((model_ && !f.model_) || (!model_ && f.model_))
|
if ((model_ && !f.model_) || (!model_ && f.model_)) {
|
||||||
|
if (verbose)
|
||||||
|
cout << "JacobianFactor::equals: noise model mismatch" << endl;
|
||||||
return false;
|
return false;
|
||||||
if (model_ && f.model_ && !model_->equals(*f.model_, tol))
|
}
|
||||||
|
if (model_ && f.model_ && !model_->equals(*f.model_, tol)) {
|
||||||
|
if (verbose)
|
||||||
|
cout << "JacobianFactor::equals: noise modesl are not equal" << endl;
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// Check matrix sizes
|
// Check matrix sizes
|
||||||
if (!(Ab_.rows() == f.Ab_.rows() && Ab_.cols() == f.Ab_.cols()))
|
if (!(Ab_.rows() == f.Ab_.rows() && Ab_.cols() == f.Ab_.cols())) {
|
||||||
|
if (verbose)
|
||||||
|
cout << "JacobianFactor::equals: augmented size mismatch" << endl;
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// Check matrix contents
|
// Check matrix contents
|
||||||
constABlock Ab1(Ab_.range(0, Ab_.nBlocks()));
|
constABlock Ab1(Ab_.range(0, Ab_.nBlocks()));
|
||||||
constABlock Ab2(f.Ab_.range(0, f.Ab_.nBlocks()));
|
constABlock Ab2(f.Ab_.range(0, f.Ab_.nBlocks()));
|
||||||
for (size_t row = 0; row < (size_t) Ab1.rows(); ++row)
|
for (size_t row = 0; row < (size_t) Ab1.rows(); ++row)
|
||||||
if (!equal_with_abs_tol(Ab1.row(row), Ab2.row(row), tol)
|
if (!equal_with_abs_tol(Ab1.row(row), Ab2.row(row), tol)
|
||||||
&& !equal_with_abs_tol(-Ab1.row(row), Ab2.row(row), tol))
|
&& !equal_with_abs_tol(-Ab1.row(row), Ab2.row(row), tol)) {
|
||||||
|
if (verbose)
|
||||||
|
cout << "JacobianFactor::equals: matrix mismatch at row " << row << endl;
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <typeinfo>
|
#include <typeinfo>
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
|
#include <cmath>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
|
@ -70,6 +71,11 @@ boost::optional<Vector> checkIfDiagonal(const Matrix M) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
Vector Base::sigmas() const {
|
||||||
|
throw("Base::sigmas: sigmas() not implemented for this noise model");
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Gaussian::shared_ptr Gaussian::SqrtInformation(const Matrix& R, bool smart) {
|
Gaussian::shared_ptr Gaussian::SqrtInformation(const Matrix& R, bool smart) {
|
||||||
size_t m = R.rows(), n = R.cols();
|
size_t m = R.rows(), n = R.cols();
|
||||||
|
@ -79,24 +85,25 @@ Gaussian::shared_ptr Gaussian::SqrtInformation(const Matrix& R, bool smart) {
|
||||||
if (smart)
|
if (smart)
|
||||||
diagonal = checkIfDiagonal(R);
|
diagonal = checkIfDiagonal(R);
|
||||||
if (diagonal)
|
if (diagonal)
|
||||||
return Diagonal::Sigmas(reciprocal(*diagonal), true);
|
return Diagonal::Sigmas(diagonal->array().inverse(), true);
|
||||||
else
|
else
|
||||||
return shared_ptr(new Gaussian(R.rows(), R));
|
return shared_ptr(new Gaussian(R.rows(), R));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Gaussian::shared_ptr Gaussian::Information(const Matrix& M, bool smart) {
|
Gaussian::shared_ptr Gaussian::Information(const Matrix& information, bool smart) {
|
||||||
size_t m = M.rows(), n = M.cols();
|
size_t m = information.rows(), n = information.cols();
|
||||||
if (m != n)
|
if (m != n)
|
||||||
throw invalid_argument("Gaussian::Information: R not square");
|
throw invalid_argument("Gaussian::Information: R not square");
|
||||||
boost::optional<Vector> diagonal = boost::none;
|
boost::optional<Vector> diagonal = boost::none;
|
||||||
if (smart)
|
if (smart)
|
||||||
diagonal = checkIfDiagonal(M);
|
diagonal = checkIfDiagonal(information);
|
||||||
if (diagonal)
|
if (diagonal)
|
||||||
return Diagonal::Precisions(*diagonal, true);
|
return Diagonal::Precisions(*diagonal, true);
|
||||||
else {
|
else {
|
||||||
Matrix R = RtR(M);
|
Eigen::LLT<Matrix> llt(information);
|
||||||
return shared_ptr(new Gaussian(R.rows(), R));
|
Matrix R = llt.matrixU();
|
||||||
|
return shared_ptr(new Gaussian(n, R));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,13 +118,15 @@ Gaussian::shared_ptr Gaussian::Covariance(const Matrix& covariance,
|
||||||
variances = checkIfDiagonal(covariance);
|
variances = checkIfDiagonal(covariance);
|
||||||
if (variances)
|
if (variances)
|
||||||
return Diagonal::Variances(*variances, true);
|
return Diagonal::Variances(*variances, true);
|
||||||
else
|
else {
|
||||||
return shared_ptr(new Gaussian(n, inverse_square_root(covariance)));
|
// TODO: can we do this more efficiently and still get an upper triangular nmatrix??
|
||||||
|
return Information(covariance.inverse(), false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void Gaussian::print(const string& name) const {
|
void Gaussian::print(const string& name) const {
|
||||||
gtsam::print(thisR(), "Gaussian");
|
gtsam::print(thisR(), name + "Gaussian");
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -129,6 +138,12 @@ bool Gaussian::equals(const Base& expected, double tol) const {
|
||||||
return equal_with_abs_tol(R(), p->R(), sqrt(tol));
|
return equal_with_abs_tol(R(), p->R(), sqrt(tol));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
Vector Gaussian::sigmas() const {
|
||||||
|
// TODO(frank): can this be done faster?
|
||||||
|
return Vector((thisR().transpose() * thisR()).inverse().diagonal()).cwiseSqrt();
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector Gaussian::whiten(const Vector& v) const {
|
Vector Gaussian::whiten(const Vector& v) const {
|
||||||
return thisR() * v;
|
return thisR() * v;
|
||||||
|
@ -221,9 +236,11 @@ Diagonal::Diagonal() :
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Diagonal::Diagonal(const Vector& sigmas) :
|
Diagonal::Diagonal(const Vector& sigmas)
|
||||||
Gaussian(sigmas.size()), sigmas_(sigmas), invsigmas_(reciprocal(sigmas)), precisions_(
|
: Gaussian(sigmas.size()),
|
||||||
emul(invsigmas_, invsigmas_)) {
|
sigmas_(sigmas),
|
||||||
|
invsigmas_(sigmas.array().inverse()),
|
||||||
|
precisions_(invsigmas_.array().square()) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -262,12 +279,12 @@ void Diagonal::print(const string& name) const {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector Diagonal::whiten(const Vector& v) const {
|
Vector Diagonal::whiten(const Vector& v) const {
|
||||||
return emul(v, invsigmas());
|
return v.cwiseProduct(invsigmas_);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Vector Diagonal::unwhiten(const Vector& v) const {
|
Vector Diagonal::unwhiten(const Vector& v) const {
|
||||||
return emul(v, sigmas_);
|
return v.cwiseProduct(sigmas_);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -342,7 +359,7 @@ Vector Constrained::whiten(const Vector& v) const {
|
||||||
assert (b.size()==a.size());
|
assert (b.size()==a.size());
|
||||||
Vector c(n);
|
Vector c(n);
|
||||||
for( size_t i = 0; i < n; i++ ) {
|
for( size_t i = 0; i < n; i++ ) {
|
||||||
const double& ai = a(i), &bi = b(i);
|
const double& ai = a(i), bi = b(i);
|
||||||
c(i) = (bi==0.0) ? ai : ai/bi; // NOTE: not ediv_()
|
c(i) = (bi==0.0) ? ai : ai/bi; // NOTE: not ediv_()
|
||||||
}
|
}
|
||||||
return c;
|
return c;
|
||||||
|
@ -404,8 +421,8 @@ SharedDiagonal Constrained::QR(Matrix& Ab) const {
|
||||||
list<Triple> Rd;
|
list<Triple> Rd;
|
||||||
|
|
||||||
Vector pseudo(m); // allocate storage for pseudo-inverse
|
Vector pseudo(m); // allocate storage for pseudo-inverse
|
||||||
Vector invsigmas = reciprocal(sigmas_);
|
Vector invsigmas = sigmas_.array().inverse();
|
||||||
Vector weights = emul(invsigmas,invsigmas); // calculate weights once
|
Vector weights = invsigmas.array().square(); // calculate weights once
|
||||||
|
|
||||||
// We loop over all columns, because the columns that can be eliminated
|
// We loop over all columns, because the columns that can be eliminated
|
||||||
// are not necessarily contiguous. For each one, estimate the corresponding
|
// are not necessarily contiguous. For each one, estimate the corresponding
|
||||||
|
@ -542,16 +559,6 @@ Vector Base::weight(const Vector &error) const {
|
||||||
return w;
|
return w;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** square root version of the weight function */
|
|
||||||
Vector Base::sqrtWeight(const Vector &error) const {
|
|
||||||
const size_t n = error.rows();
|
|
||||||
Vector w(n);
|
|
||||||
for ( size_t i = 0 ; i < n ; ++i )
|
|
||||||
w(i) = sqrtWeight(error(i));
|
|
||||||
return w;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/** The following three functions reweight block matrices and a vector
|
/** The following three functions reweight block matrices and a vector
|
||||||
* according to their weight implementation */
|
* according to their weight implementation */
|
||||||
|
|
||||||
|
@ -560,8 +567,7 @@ void Base::reweight(Vector& error) const {
|
||||||
const double w = sqrtWeight(error.norm());
|
const double w = sqrtWeight(error.norm());
|
||||||
error *= w;
|
error *= w;
|
||||||
} else {
|
} else {
|
||||||
const Vector w = sqrtWeight(error);
|
error.array() *= weight(error).cwiseSqrt().array();
|
||||||
error.array() *= w.array();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -579,7 +585,7 @@ void Base::reweight(vector<Matrix> &A, Vector &error) const {
|
||||||
BOOST_FOREACH(Matrix& Aj, A) {
|
BOOST_FOREACH(Matrix& Aj, A) {
|
||||||
vector_scale_inplace(W,Aj);
|
vector_scale_inplace(W,Aj);
|
||||||
}
|
}
|
||||||
error = emul(W, error);
|
error = W.cwiseProduct(error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -593,7 +599,7 @@ void Base::reweight(Matrix &A, Vector &error) const {
|
||||||
else {
|
else {
|
||||||
const Vector W = sqrtWeight(error);
|
const Vector W = sqrtWeight(error);
|
||||||
vector_scale_inplace(W,A);
|
vector_scale_inplace(W,A);
|
||||||
error = emul(W, error);
|
error = W.cwiseProduct(error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -609,7 +615,7 @@ void Base::reweight(Matrix &A1, Matrix &A2, Vector &error) const {
|
||||||
const Vector W = sqrtWeight(error);
|
const Vector W = sqrtWeight(error);
|
||||||
vector_scale_inplace(W,A1);
|
vector_scale_inplace(W,A1);
|
||||||
vector_scale_inplace(W,A2);
|
vector_scale_inplace(W,A2);
|
||||||
error = emul(W, error);
|
error = W.cwiseProduct(error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -627,7 +633,7 @@ void Base::reweight(Matrix &A1, Matrix &A2, Matrix &A3, Vector &error) const {
|
||||||
vector_scale_inplace(W,A1);
|
vector_scale_inplace(W,A1);
|
||||||
vector_scale_inplace(W,A2);
|
vector_scale_inplace(W,A2);
|
||||||
vector_scale_inplace(W,A3);
|
vector_scale_inplace(W,A3);
|
||||||
error = emul(W, error);
|
error = W.cwiseProduct(error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -641,7 +647,7 @@ void Null::print(const std::string &s="") const
|
||||||
Null::shared_ptr Null::Create()
|
Null::shared_ptr Null::Create()
|
||||||
{ return shared_ptr(new Null()); }
|
{ return shared_ptr(new Null()); }
|
||||||
|
|
||||||
Fair::Fair(const double c, const ReweightScheme reweight)
|
Fair::Fair(double c, const ReweightScheme reweight)
|
||||||
: Base(reweight), c_(c) {
|
: Base(reweight), c_(c) {
|
||||||
if ( c_ <= 0 ) {
|
if ( c_ <= 0 ) {
|
||||||
cout << "mEstimator Fair takes only positive double in constructor. forced to 1.0" << endl;
|
cout << "mEstimator Fair takes only positive double in constructor. forced to 1.0" << endl;
|
||||||
|
@ -653,26 +659,26 @@ Fair::Fair(const double c, const ReweightScheme reweight)
|
||||||
// Fair
|
// Fair
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
double Fair::weight(const double &error) const
|
double Fair::weight(double error) const
|
||||||
{ return 1.0 / (1.0 + fabs(error)/c_); }
|
{ return 1.0 / (1.0 + fabs(error)/c_); }
|
||||||
|
|
||||||
void Fair::print(const std::string &s="") const
|
void Fair::print(const std::string &s="") const
|
||||||
{ cout << s << "fair (" << c_ << ")" << endl; }
|
{ cout << s << "fair (" << c_ << ")" << endl; }
|
||||||
|
|
||||||
bool Fair::equals(const Base &expected, const double tol) const {
|
bool Fair::equals(const Base &expected, double tol) const {
|
||||||
const Fair* p = dynamic_cast<const Fair*> (&expected);
|
const Fair* p = dynamic_cast<const Fair*> (&expected);
|
||||||
if (p == NULL) return false;
|
if (p == NULL) return false;
|
||||||
return fabs(c_ - p->c_ ) < tol;
|
return fabs(c_ - p->c_ ) < tol;
|
||||||
}
|
}
|
||||||
|
|
||||||
Fair::shared_ptr Fair::Create(const double c, const ReweightScheme reweight)
|
Fair::shared_ptr Fair::Create(double c, const ReweightScheme reweight)
|
||||||
{ return shared_ptr(new Fair(c, reweight)); }
|
{ return shared_ptr(new Fair(c, reweight)); }
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
// Huber
|
// Huber
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
Huber::Huber(const double k, const ReweightScheme reweight)
|
Huber::Huber(double k, const ReweightScheme reweight)
|
||||||
: Base(reweight), k_(k) {
|
: Base(reweight), k_(k) {
|
||||||
if ( k_ <= 0 ) {
|
if ( k_ <= 0 ) {
|
||||||
cout << "mEstimator Huber takes only positive double in constructor. forced to 1.0" << endl;
|
cout << "mEstimator Huber takes only positive double in constructor. forced to 1.0" << endl;
|
||||||
|
@ -680,7 +686,7 @@ Huber::Huber(const double k, const ReweightScheme reweight)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Huber::weight(const double &error) const {
|
double Huber::weight(double error) const {
|
||||||
return (error < k_) ? (1.0) : (k_ / fabs(error));
|
return (error < k_) ? (1.0) : (k_ / fabs(error));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -688,13 +694,13 @@ void Huber::print(const std::string &s="") const {
|
||||||
cout << s << "huber (" << k_ << ")" << endl;
|
cout << s << "huber (" << k_ << ")" << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Huber::equals(const Base &expected, const double tol) const {
|
bool Huber::equals(const Base &expected, double tol) const {
|
||||||
const Huber* p = dynamic_cast<const Huber*>(&expected);
|
const Huber* p = dynamic_cast<const Huber*>(&expected);
|
||||||
if (p == NULL) return false;
|
if (p == NULL) return false;
|
||||||
return fabs(k_ - p->k_) < tol;
|
return fabs(k_ - p->k_) < tol;
|
||||||
}
|
}
|
||||||
|
|
||||||
Huber::shared_ptr Huber::Create(const double c, const ReweightScheme reweight) {
|
Huber::shared_ptr Huber::Create(double c, const ReweightScheme reweight) {
|
||||||
return shared_ptr(new Huber(c, reweight));
|
return shared_ptr(new Huber(c, reweight));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -702,7 +708,7 @@ Huber::shared_ptr Huber::Create(const double c, const ReweightScheme reweight) {
|
||||||
// Cauchy
|
// Cauchy
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
Cauchy::Cauchy(const double k, const ReweightScheme reweight)
|
Cauchy::Cauchy(double k, const ReweightScheme reweight)
|
||||||
: Base(reweight), k_(k) {
|
: Base(reweight), k_(k) {
|
||||||
if ( k_ <= 0 ) {
|
if ( k_ <= 0 ) {
|
||||||
cout << "mEstimator Cauchy takes only positive double in constructor. forced to 1.0" << endl;
|
cout << "mEstimator Cauchy takes only positive double in constructor. forced to 1.0" << endl;
|
||||||
|
@ -710,7 +716,7 @@ Cauchy::Cauchy(const double k, const ReweightScheme reweight)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Cauchy::weight(const double &error) const {
|
double Cauchy::weight(double error) const {
|
||||||
return k_*k_ / (k_*k_ + error*error);
|
return k_*k_ / (k_*k_ + error*error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -718,24 +724,24 @@ void Cauchy::print(const std::string &s="") const {
|
||||||
cout << s << "cauchy (" << k_ << ")" << endl;
|
cout << s << "cauchy (" << k_ << ")" << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Cauchy::equals(const Base &expected, const double tol) const {
|
bool Cauchy::equals(const Base &expected, double tol) const {
|
||||||
const Cauchy* p = dynamic_cast<const Cauchy*>(&expected);
|
const Cauchy* p = dynamic_cast<const Cauchy*>(&expected);
|
||||||
if (p == NULL) return false;
|
if (p == NULL) return false;
|
||||||
return fabs(k_ - p->k_) < tol;
|
return fabs(k_ - p->k_) < tol;
|
||||||
}
|
}
|
||||||
|
|
||||||
Cauchy::shared_ptr Cauchy::Create(const double c, const ReweightScheme reweight) {
|
Cauchy::shared_ptr Cauchy::Create(double c, const ReweightScheme reweight) {
|
||||||
return shared_ptr(new Cauchy(c, reweight));
|
return shared_ptr(new Cauchy(c, reweight));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
// Tukey
|
// Tukey
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Tukey::Tukey(const double c, const ReweightScheme reweight)
|
Tukey::Tukey(double c, const ReweightScheme reweight)
|
||||||
: Base(reweight), c_(c) {
|
: Base(reweight), c_(c) {
|
||||||
}
|
}
|
||||||
|
|
||||||
double Tukey::weight(const double &error) const {
|
double Tukey::weight(double error) const {
|
||||||
if (fabs(error) <= c_) {
|
if (fabs(error) <= c_) {
|
||||||
double xc2 = (error/c_)*(error/c_);
|
double xc2 = (error/c_)*(error/c_);
|
||||||
double one_xc22 = (1.0-xc2)*(1.0-xc2);
|
double one_xc22 = (1.0-xc2)*(1.0-xc2);
|
||||||
|
@ -748,24 +754,24 @@ void Tukey::print(const std::string &s="") const {
|
||||||
std::cout << s << ": Tukey (" << c_ << ")" << std::endl;
|
std::cout << s << ": Tukey (" << c_ << ")" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Tukey::equals(const Base &expected, const double tol) const {
|
bool Tukey::equals(const Base &expected, double tol) const {
|
||||||
const Tukey* p = dynamic_cast<const Tukey*>(&expected);
|
const Tukey* p = dynamic_cast<const Tukey*>(&expected);
|
||||||
if (p == NULL) return false;
|
if (p == NULL) return false;
|
||||||
return fabs(c_ - p->c_) < tol;
|
return fabs(c_ - p->c_) < tol;
|
||||||
}
|
}
|
||||||
|
|
||||||
Tukey::shared_ptr Tukey::Create(const double c, const ReweightScheme reweight) {
|
Tukey::shared_ptr Tukey::Create(double c, const ReweightScheme reweight) {
|
||||||
return shared_ptr(new Tukey(c, reweight));
|
return shared_ptr(new Tukey(c, reweight));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
// Welsh
|
// Welsh
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
Welsh::Welsh(const double c, const ReweightScheme reweight)
|
Welsh::Welsh(double c, const ReweightScheme reweight)
|
||||||
: Base(reweight), c_(c) {
|
: Base(reweight), c_(c) {
|
||||||
}
|
}
|
||||||
|
|
||||||
double Welsh::weight(const double &error) const {
|
double Welsh::weight(double error) const {
|
||||||
double xc2 = (error/c_)*(error/c_);
|
double xc2 = (error/c_)*(error/c_);
|
||||||
return std::exp(-xc2);
|
return std::exp(-xc2);
|
||||||
}
|
}
|
||||||
|
@ -774,13 +780,13 @@ void Welsh::print(const std::string &s="") const {
|
||||||
std::cout << s << ": Welsh (" << c_ << ")" << std::endl;
|
std::cout << s << ": Welsh (" << c_ << ")" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Welsh::equals(const Base &expected, const double tol) const {
|
bool Welsh::equals(const Base &expected, double tol) const {
|
||||||
const Welsh* p = dynamic_cast<const Welsh*>(&expected);
|
const Welsh* p = dynamic_cast<const Welsh*>(&expected);
|
||||||
if (p == NULL) return false;
|
if (p == NULL) return false;
|
||||||
return fabs(c_ - p->c_) < tol;
|
return fabs(c_ - p->c_) < tol;
|
||||||
}
|
}
|
||||||
|
|
||||||
Welsh::shared_ptr Welsh::Create(const double c, const ReweightScheme reweight) {
|
Welsh::shared_ptr Welsh::Create(double c, const ReweightScheme reweight) {
|
||||||
return shared_ptr(new Welsh(c, reweight));
|
return shared_ptr(new Welsh(c, reweight));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,7 +26,6 @@
|
||||||
#include <boost/serialization/singleton.hpp>
|
#include <boost/serialization/singleton.hpp>
|
||||||
#include <boost/serialization/shared_ptr.hpp>
|
#include <boost/serialization/shared_ptr.hpp>
|
||||||
#include <boost/serialization/optional.hpp>
|
#include <boost/serialization/optional.hpp>
|
||||||
#include <cmath>
|
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
|
@ -59,7 +58,7 @@ namespace gtsam {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/** primary constructor @param dim is the dimension of the model */
|
/// primary constructor @param dim is the dimension of the model
|
||||||
Base(size_t dim = 1):dim_(dim) {}
|
Base(size_t dim = 1):dim_(dim) {}
|
||||||
virtual ~Base() {}
|
virtual ~Base() {}
|
||||||
|
|
||||||
|
@ -75,14 +74,13 @@ namespace gtsam {
|
||||||
|
|
||||||
virtual bool equals(const Base& expected, double tol=1e-9) const = 0;
|
virtual bool equals(const Base& expected, double tol=1e-9) const = 0;
|
||||||
|
|
||||||
/**
|
/// Calculate standard deviations
|
||||||
* Whiten an error vector.
|
virtual Vector sigmas() const;
|
||||||
*/
|
|
||||||
|
/// Whiten an error vector.
|
||||||
virtual Vector whiten(const Vector& v) const = 0;
|
virtual Vector whiten(const Vector& v) const = 0;
|
||||||
|
|
||||||
/**
|
/// Unwhiten an error vector.
|
||||||
* Unwhiten an error vector.
|
|
||||||
*/
|
|
||||||
virtual Vector unwhiten(const Vector& v) const = 0;
|
virtual Vector unwhiten(const Vector& v) const = 0;
|
||||||
|
|
||||||
virtual double distance(const Vector& v) const = 0;
|
virtual double distance(const Vector& v) const = 0;
|
||||||
|
@ -189,6 +187,7 @@ namespace gtsam {
|
||||||
|
|
||||||
virtual void print(const std::string& name) const;
|
virtual void print(const std::string& name) const;
|
||||||
virtual bool equals(const Base& expected, double tol=1e-9) const;
|
virtual bool equals(const Base& expected, double tol=1e-9) const;
|
||||||
|
virtual Vector sigmas() const;
|
||||||
virtual Vector whiten(const Vector& v) const;
|
virtual Vector whiten(const Vector& v) const;
|
||||||
virtual Vector unwhiten(const Vector& v) const;
|
virtual Vector unwhiten(const Vector& v) const;
|
||||||
|
|
||||||
|
@ -220,9 +219,9 @@ namespace gtsam {
|
||||||
/**
|
/**
|
||||||
* Whiten a system, in place as well
|
* Whiten a system, in place as well
|
||||||
*/
|
*/
|
||||||
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const ;
|
virtual void WhitenSystem(std::vector<Matrix>& A, Vector& b) const;
|
||||||
virtual void WhitenSystem(Matrix& A, Vector& b) const ;
|
virtual void WhitenSystem(Matrix& A, Vector& b) const;
|
||||||
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const ;
|
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Vector& b) const;
|
||||||
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const;
|
virtual void WhitenSystem(Matrix& A1, Matrix& A2, Matrix& A3, Vector& b) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -234,11 +233,15 @@ namespace gtsam {
|
||||||
*/
|
*/
|
||||||
virtual boost::shared_ptr<Diagonal> QR(Matrix& Ab) const;
|
virtual boost::shared_ptr<Diagonal> QR(Matrix& Ab) const;
|
||||||
|
|
||||||
/**
|
/// Return R itself, but note that Whiten(H) is cheaper than R*H
|
||||||
* Return R itself, but note that Whiten(H) is cheaper than R*H
|
|
||||||
*/
|
|
||||||
virtual Matrix R() const { return thisR();}
|
virtual Matrix R() const { return thisR();}
|
||||||
|
|
||||||
|
/// Compute information matrix
|
||||||
|
virtual Matrix information() const { return thisR().transpose() * thisR(); }
|
||||||
|
|
||||||
|
/// Compute covariance matrix
|
||||||
|
virtual Matrix covariance() const { return information().inverse(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/** Serialization function */
|
/** Serialization function */
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
|
@ -303,6 +306,7 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void print(const std::string& name) const;
|
virtual void print(const std::string& name) const;
|
||||||
|
virtual Vector sigmas() const { return sigmas_; }
|
||||||
virtual Vector whiten(const Vector& v) const;
|
virtual Vector whiten(const Vector& v) const;
|
||||||
virtual Vector unwhiten(const Vector& v) const;
|
virtual Vector unwhiten(const Vector& v) const;
|
||||||
virtual Matrix Whiten(const Matrix& H) const;
|
virtual Matrix Whiten(const Matrix& H) const;
|
||||||
|
@ -312,7 +316,6 @@ namespace gtsam {
|
||||||
/**
|
/**
|
||||||
* Return standard deviations (sqrt of diagonal)
|
* Return standard deviations (sqrt of diagonal)
|
||||||
*/
|
*/
|
||||||
inline const Vector& sigmas() const { return sigmas_; }
|
|
||||||
inline double sigma(size_t i) const { return sigmas_(i); }
|
inline double sigma(size_t i) const { return sigmas_(i); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -629,20 +632,23 @@ namespace gtsam {
|
||||||
virtual ~Base() {}
|
virtual ~Base() {}
|
||||||
|
|
||||||
/// robust error function to implement
|
/// robust error function to implement
|
||||||
virtual double weight(const double &error) const = 0;
|
virtual double weight(double error) const = 0;
|
||||||
|
|
||||||
virtual void print(const std::string &s) const = 0;
|
virtual void print(const std::string &s) const = 0;
|
||||||
virtual bool equals(const Base& expected, const double tol=1e-8) const = 0;
|
virtual bool equals(const Base& expected, double tol=1e-8) const = 0;
|
||||||
|
|
||||||
inline double sqrtWeight(const double &error) const
|
double sqrtWeight(double error) const {
|
||||||
{ return std::sqrt(weight(error)); }
|
return std::sqrt(weight(error));
|
||||||
|
}
|
||||||
|
|
||||||
/** produce a weight vector according to an error vector and the implemented
|
/** produce a weight vector according to an error vector and the implemented
|
||||||
* robust function */
|
* robust function */
|
||||||
Vector weight(const Vector &error) const;
|
Vector weight(const Vector &error) const;
|
||||||
|
|
||||||
/** square root version of the weight function */
|
/** square root version of the weight function */
|
||||||
Vector sqrtWeight(const Vector &error) const;
|
Vector sqrtWeight(const Vector &error) const {
|
||||||
|
return weight(error).cwiseSqrt();
|
||||||
|
}
|
||||||
|
|
||||||
/** reweight block matrices and a vector according to their weight implementation */
|
/** reweight block matrices and a vector according to their weight implementation */
|
||||||
void reweight(Vector &error) const;
|
void reweight(Vector &error) const;
|
||||||
|
@ -667,9 +673,9 @@ namespace gtsam {
|
||||||
|
|
||||||
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
|
Null(const ReweightScheme reweight = Block) : Base(reweight) {}
|
||||||
virtual ~Null() {}
|
virtual ~Null() {}
|
||||||
virtual double weight(const double& /*error*/) const { return 1.0; }
|
virtual double weight(double /*error*/) const { return 1.0; }
|
||||||
virtual void print(const std::string &s) const;
|
virtual void print(const std::string &s) const;
|
||||||
virtual bool equals(const Base& /*expected*/, const double /*tol*/) const { return true; }
|
virtual bool equals(const Base& /*expected*/, double /*tol*/) const { return true; }
|
||||||
static shared_ptr Create() ;
|
static shared_ptr Create() ;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -686,12 +692,12 @@ namespace gtsam {
|
||||||
public:
|
public:
|
||||||
typedef boost::shared_ptr<Fair> shared_ptr;
|
typedef boost::shared_ptr<Fair> shared_ptr;
|
||||||
|
|
||||||
Fair(const double c = 1.3998, const ReweightScheme reweight = Block);
|
Fair(double c = 1.3998, const ReweightScheme reweight = Block);
|
||||||
virtual ~Fair() {}
|
virtual ~Fair() {}
|
||||||
virtual double weight(const double &error) const ;
|
virtual double weight(double error) const;
|
||||||
virtual void print(const std::string &s) const ;
|
virtual void print(const std::string &s) const;
|
||||||
virtual bool equals(const Base& expected, const double tol=1e-8) const ;
|
virtual bool equals(const Base& expected, double tol=1e-8) const;
|
||||||
static shared_ptr Create(const double c, const ReweightScheme reweight = Block) ;
|
static shared_ptr Create(double c, const ReweightScheme reweight = Block) ;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
double c_;
|
double c_;
|
||||||
|
@ -712,11 +718,11 @@ namespace gtsam {
|
||||||
typedef boost::shared_ptr<Huber> shared_ptr;
|
typedef boost::shared_ptr<Huber> shared_ptr;
|
||||||
|
|
||||||
virtual ~Huber() {}
|
virtual ~Huber() {}
|
||||||
Huber(const double k = 1.345, const ReweightScheme reweight = Block);
|
Huber(double k = 1.345, const ReweightScheme reweight = Block);
|
||||||
virtual double weight(const double &error) const ;
|
virtual double weight(double error) const;
|
||||||
virtual void print(const std::string &s) const ;
|
virtual void print(const std::string &s) const;
|
||||||
virtual bool equals(const Base& expected, const double tol=1e-8) const ;
|
virtual bool equals(const Base& expected, double tol=1e-8) const;
|
||||||
static shared_ptr Create(const double k, const ReweightScheme reweight = Block) ;
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
double k_;
|
double k_;
|
||||||
|
@ -741,11 +747,11 @@ namespace gtsam {
|
||||||
typedef boost::shared_ptr<Cauchy> shared_ptr;
|
typedef boost::shared_ptr<Cauchy> shared_ptr;
|
||||||
|
|
||||||
virtual ~Cauchy() {}
|
virtual ~Cauchy() {}
|
||||||
Cauchy(const double k = 0.1, const ReweightScheme reweight = Block);
|
Cauchy(double k = 0.1, const ReweightScheme reweight = Block);
|
||||||
virtual double weight(const double &error) const ;
|
virtual double weight(double error) const;
|
||||||
virtual void print(const std::string &s) const ;
|
virtual void print(const std::string &s) const;
|
||||||
virtual bool equals(const Base& expected, const double tol=1e-8) const ;
|
virtual bool equals(const Base& expected, double tol=1e-8) const;
|
||||||
static shared_ptr Create(const double k, const ReweightScheme reweight = Block) ;
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
double k_;
|
double k_;
|
||||||
|
@ -765,12 +771,12 @@ namespace gtsam {
|
||||||
public:
|
public:
|
||||||
typedef boost::shared_ptr<Tukey> shared_ptr;
|
typedef boost::shared_ptr<Tukey> shared_ptr;
|
||||||
|
|
||||||
Tukey(const double c = 4.6851, const ReweightScheme reweight = Block);
|
Tukey(double c = 4.6851, const ReweightScheme reweight = Block);
|
||||||
virtual ~Tukey() {}
|
virtual ~Tukey() {}
|
||||||
virtual double weight(const double &error) const ;
|
virtual double weight(double error) const;
|
||||||
virtual void print(const std::string &s) const ;
|
virtual void print(const std::string &s) const;
|
||||||
virtual bool equals(const Base& expected, const double tol=1e-8) const ;
|
virtual bool equals(const Base& expected, double tol=1e-8) const;
|
||||||
static shared_ptr Create(const double k, const ReweightScheme reweight = Block) ;
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
double c_;
|
double c_;
|
||||||
|
@ -790,12 +796,12 @@ namespace gtsam {
|
||||||
public:
|
public:
|
||||||
typedef boost::shared_ptr<Welsh> shared_ptr;
|
typedef boost::shared_ptr<Welsh> shared_ptr;
|
||||||
|
|
||||||
Welsh(const double c = 2.9846, const ReweightScheme reweight = Block);
|
Welsh(double c = 2.9846, const ReweightScheme reweight = Block);
|
||||||
virtual ~Welsh() {}
|
virtual ~Welsh() {}
|
||||||
virtual double weight(const double &error) const ;
|
virtual double weight(double error) const;
|
||||||
virtual void print(const std::string &s) const ;
|
virtual void print(const std::string &s) const;
|
||||||
virtual bool equals(const Base& expected, const double tol=1e-8) const ;
|
virtual bool equals(const Base& expected, double tol=1e-8) const;
|
||||||
static shared_ptr Create(const double k, const ReweightScheme reweight = Block) ;
|
static shared_ptr Create(double k, const ReweightScheme reweight = Block) ;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
double c_;
|
double c_;
|
||||||
|
|
|
@ -70,7 +70,7 @@ namespace gtsam {
|
||||||
Subgraph(const std::vector<size_t> &indices) ;
|
Subgraph(const std::vector<size_t> &indices) ;
|
||||||
|
|
||||||
inline const Edges& edges() const { return edges_; }
|
inline const Edges& edges() const { return edges_; }
|
||||||
inline const size_t size() const { return edges_.size(); }
|
inline size_t size() const { return edges_.size(); }
|
||||||
EdgeIndices edgeIndices() const;
|
EdgeIndices edgeIndices() const;
|
||||||
|
|
||||||
iterator begin() { return edges_.begin(); }
|
iterator begin() { return edges_.begin(); }
|
||||||
|
|
|
@ -33,15 +33,16 @@ using namespace gtsam;
|
||||||
using namespace noiseModel;
|
using namespace noiseModel;
|
||||||
using namespace boost::assign;
|
using namespace boost::assign;
|
||||||
|
|
||||||
static double sigma = 2, s_1=1.0/sigma, var = sigma*sigma, prc = 1.0/var;
|
static const double kSigma = 2, s_1=1.0/kSigma, kVariance = kSigma*kSigma, prc = 1.0/kVariance;
|
||||||
static Matrix R = (Matrix(3, 3) <<
|
static const Matrix R = (Matrix(3, 3) <<
|
||||||
s_1, 0.0, 0.0,
|
s_1, 0.0, 0.0,
|
||||||
0.0, s_1, 0.0,
|
0.0, s_1, 0.0,
|
||||||
0.0, 0.0, s_1).finished();
|
0.0, 0.0, s_1).finished();
|
||||||
static Matrix Sigma = (Matrix(3, 3) <<
|
static const Matrix kCovariance = (Matrix(3, 3) <<
|
||||||
var, 0.0, 0.0,
|
kVariance, 0.0, 0.0,
|
||||||
0.0, var, 0.0,
|
0.0, kVariance, 0.0,
|
||||||
0.0, 0.0, var).finished();
|
0.0, 0.0, kVariance).finished();
|
||||||
|
static const Vector3 kSigmas(kSigma, kSigma, kSigma);
|
||||||
|
|
||||||
//static double inf = numeric_limits<double>::infinity();
|
//static double inf = numeric_limits<double>::infinity();
|
||||||
|
|
||||||
|
@ -53,15 +54,19 @@ TEST(NoiseModel, constructors)
|
||||||
|
|
||||||
// Construct noise models
|
// Construct noise models
|
||||||
vector<Gaussian::shared_ptr> m;
|
vector<Gaussian::shared_ptr> m;
|
||||||
m.push_back(Gaussian::SqrtInformation(R));
|
m.push_back(Gaussian::SqrtInformation(R,false));
|
||||||
m.push_back(Gaussian::Covariance(Sigma));
|
m.push_back(Gaussian::Covariance(kCovariance,false));
|
||||||
//m.push_back(Gaussian::Information(Q));
|
m.push_back(Gaussian::Information(kCovariance.inverse(),false));
|
||||||
m.push_back(Diagonal::Sigmas((Vector(3) << sigma, sigma, sigma).finished()));
|
m.push_back(Diagonal::Sigmas(kSigmas,false));
|
||||||
m.push_back(Diagonal::Variances((Vector(3) << var, var, var).finished()));
|
m.push_back(Diagonal::Variances((Vector(3) << kVariance, kVariance, kVariance).finished(),false));
|
||||||
m.push_back(Diagonal::Precisions((Vector(3) << prc, prc, prc).finished()));
|
m.push_back(Diagonal::Precisions((Vector(3) << prc, prc, prc).finished(),false));
|
||||||
m.push_back(Isotropic::Sigma(3, sigma));
|
m.push_back(Isotropic::Sigma(3, kSigma,false));
|
||||||
m.push_back(Isotropic::Variance(3, var));
|
m.push_back(Isotropic::Variance(3, kVariance,false));
|
||||||
m.push_back(Isotropic::Precision(3, prc));
|
m.push_back(Isotropic::Precision(3, prc,false));
|
||||||
|
|
||||||
|
// test kSigmas
|
||||||
|
BOOST_FOREACH(Gaussian::shared_ptr mi, m)
|
||||||
|
EXPECT(assert_equal(kSigmas,mi->sigmas()));
|
||||||
|
|
||||||
// test whiten
|
// test whiten
|
||||||
BOOST_FOREACH(Gaussian::shared_ptr mi, m)
|
BOOST_FOREACH(Gaussian::shared_ptr mi, m)
|
||||||
|
@ -117,9 +122,9 @@ TEST(NoiseModel, equals)
|
||||||
{
|
{
|
||||||
Gaussian::shared_ptr g1 = Gaussian::SqrtInformation(R),
|
Gaussian::shared_ptr g1 = Gaussian::SqrtInformation(R),
|
||||||
g2 = Gaussian::SqrtInformation(eye(3,3));
|
g2 = Gaussian::SqrtInformation(eye(3,3));
|
||||||
Diagonal::shared_ptr d1 = Diagonal::Sigmas((Vector(3) << sigma, sigma, sigma).finished()),
|
Diagonal::shared_ptr d1 = Diagonal::Sigmas((Vector(3) << kSigma, kSigma, kSigma).finished()),
|
||||||
d2 = Diagonal::Sigmas(Vector3(0.1, 0.2, 0.3));
|
d2 = Diagonal::Sigmas(Vector3(0.1, 0.2, 0.3));
|
||||||
Isotropic::shared_ptr i1 = Isotropic::Sigma(3, sigma),
|
Isotropic::shared_ptr i1 = Isotropic::Sigma(3, kSigma),
|
||||||
i2 = Isotropic::Sigma(3, 0.7);
|
i2 = Isotropic::Sigma(3, 0.7);
|
||||||
|
|
||||||
EXPECT(assert_equal(*g1,*g1));
|
EXPECT(assert_equal(*g1,*g1));
|
||||||
|
@ -155,7 +160,7 @@ TEST(NoiseModel, ConstrainedConstructors )
|
||||||
Constrained::shared_ptr actual;
|
Constrained::shared_ptr actual;
|
||||||
size_t d = 3;
|
size_t d = 3;
|
||||||
double m = 100.0;
|
double m = 100.0;
|
||||||
Vector sigmas = (Vector(3) << sigma, 0.0, 0.0).finished();
|
Vector sigmas = (Vector(3) << kSigma, 0.0, 0.0).finished();
|
||||||
Vector mu = Vector3(200.0, 300.0, 400.0);
|
Vector mu = Vector3(200.0, 300.0, 400.0);
|
||||||
actual = Constrained::All(d);
|
actual = Constrained::All(d);
|
||||||
// TODO: why should this be a thousand ??? Dummy variable?
|
// TODO: why should this be a thousand ??? Dummy variable?
|
||||||
|
@ -182,7 +187,7 @@ TEST(NoiseModel, ConstrainedMixed )
|
||||||
{
|
{
|
||||||
Vector feasible = Vector3(1.0, 0.0, 1.0),
|
Vector feasible = Vector3(1.0, 0.0, 1.0),
|
||||||
infeasible = Vector3(1.0, 1.0, 1.0);
|
infeasible = Vector3(1.0, 1.0, 1.0);
|
||||||
Diagonal::shared_ptr d = Constrained::MixedSigmas((Vector(3) << sigma, 0.0, sigma).finished());
|
Diagonal::shared_ptr d = Constrained::MixedSigmas((Vector(3) << kSigma, 0.0, kSigma).finished());
|
||||||
// NOTE: we catch constrained variables elsewhere, so whitening does nothing
|
// NOTE: we catch constrained variables elsewhere, so whitening does nothing
|
||||||
EXPECT(assert_equal(Vector3(0.5, 1.0, 0.5),d->whiten(infeasible)));
|
EXPECT(assert_equal(Vector3(0.5, 1.0, 0.5),d->whiten(infeasible)));
|
||||||
EXPECT(assert_equal(Vector3(0.5, 0.0, 0.5),d->whiten(feasible)));
|
EXPECT(assert_equal(Vector3(0.5, 0.0, 0.5),d->whiten(feasible)));
|
||||||
|
@ -357,6 +362,44 @@ TEST(NoiseModel, robustNoise)
|
||||||
DOUBLES_EQUAL(sqrt(k/100.0)*1000.0, A(1,1), 1e-8);
|
DOUBLES_EQUAL(sqrt(k/100.0)*1000.0, A(1,1), 1e-8);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
#define TEST_GAUSSIAN(gaussian)\
|
||||||
|
EQUALITY(info, gaussian->information());\
|
||||||
|
EQUALITY(cov, gaussian->covariance());\
|
||||||
|
EXPECT(assert_equal(white, gaussian->whiten(e)));\
|
||||||
|
EXPECT(assert_equal(e, gaussian->unwhiten(white)));\
|
||||||
|
EXPECT_DOUBLES_EQUAL(251, gaussian->distance(e), 1e-9);\
|
||||||
|
Matrix A = R.inverse(); Vector b = e;\
|
||||||
|
gaussian->WhitenSystem(A, b);\
|
||||||
|
EXPECT(assert_equal(I, A));\
|
||||||
|
EXPECT(assert_equal(white, b));
|
||||||
|
|
||||||
|
TEST(NoiseModel, NonDiagonalGaussian)
|
||||||
|
{
|
||||||
|
Matrix3 R;
|
||||||
|
R << 6, 5, 4, 0, 3, 2, 0, 0, 1;
|
||||||
|
const Matrix3 info = R.transpose() * R;
|
||||||
|
const Matrix3 cov = info.inverse();
|
||||||
|
const Vector3 e(1, 1, 1), white = R * e;
|
||||||
|
Matrix I = Matrix3::Identity();
|
||||||
|
|
||||||
|
|
||||||
|
{
|
||||||
|
SharedGaussian gaussian = Gaussian::SqrtInformation(R);
|
||||||
|
TEST_GAUSSIAN(gaussian);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
SharedGaussian gaussian = Gaussian::Information(info);
|
||||||
|
TEST_GAUSSIAN(gaussian);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
SharedGaussian gaussian = Gaussian::Covariance(cov);
|
||||||
|
TEST_GAUSSIAN(gaussian);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
int main() { TestResult tr; return TestRegistry::runAllTests(tr); }
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -17,8 +17,10 @@
|
||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include <gtsam/base/Matrix.h>
|
||||||
|
#include <gtsam/base/Vector.h>
|
||||||
|
#include <gtsam/base/VectorSpace.h>
|
||||||
#include <boost/serialization/nvp.hpp>
|
#include <boost/serialization/nvp.hpp>
|
||||||
#include <gtsam/geometry/Pose3.h>
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NOTES:
|
* NOTES:
|
||||||
|
@ -131,8 +133,8 @@ public:
|
||||||
/// print with optional string
|
/// print with optional string
|
||||||
void print(const std::string& s = "") const {
|
void print(const std::string& s = "") const {
|
||||||
// explicit printing for now.
|
// explicit printing for now.
|
||||||
std::cout << s + ".biasAcc [" << biasAcc_.transpose() << "]" << std::endl;
|
std::cout << s + ".Acc [" << biasAcc_.transpose() << "]" << std::endl;
|
||||||
std::cout << s + ".biasGyro [" << biasGyro_.transpose() << "]" << std::endl;
|
std::cout << s + ".Gyro [" << biasGyro_.transpose() << "]" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** equality up to tolerance */
|
/** equality up to tolerance */
|
||||||
|
|
|
@ -44,7 +44,6 @@ ImuFactor::PreintegratedMeasurements::PreintegratedMeasurements(
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
void ImuFactor::PreintegratedMeasurements::print(const string& s) const {
|
void ImuFactor::PreintegratedMeasurements::print(const string& s) const {
|
||||||
PreintegrationBase::print(s);
|
PreintegrationBase::print(s);
|
||||||
cout << " preintMeasCov = \n [ " << preintMeasCov_ << " ]" << endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
@ -75,8 +74,7 @@ void ImuFactor::PreintegratedMeasurements::integrateMeasurement(
|
||||||
const Rot3 Rincr = Rot3::Expmap(integratedOmega, D_Rincr_integratedOmega); // rotation increment computed from the current rotation rate measurement
|
const Rot3 Rincr = Rot3::Expmap(integratedOmega, D_Rincr_integratedOmega); // rotation increment computed from the current rotation rate measurement
|
||||||
|
|
||||||
// Update Jacobians
|
// Update Jacobians
|
||||||
updatePreintegratedJacobians(correctedAcc, D_Rincr_integratedOmega, Rincr,
|
updatePreintegratedJacobians(correctedAcc, D_Rincr_integratedOmega, Rincr, deltaT);
|
||||||
deltaT);
|
|
||||||
|
|
||||||
// Update preintegrated measurements (also get Jacobian)
|
// Update preintegrated measurements (also get Jacobian)
|
||||||
const Matrix3 R_i = deltaRij(); // store this, which is useful to compute G_test
|
const Matrix3 R_i = deltaRij(); // store this, which is useful to compute G_test
|
||||||
|
@ -89,8 +87,8 @@ void ImuFactor::PreintegratedMeasurements::integrateMeasurement(
|
||||||
// preintMeasCov = F * preintMeasCov * F.transpose() + G * (1/deltaT) * measurementCovariance * G.transpose();
|
// preintMeasCov = F * preintMeasCov * F.transpose() + G * (1/deltaT) * measurementCovariance * G.transpose();
|
||||||
// NOTE 1: (1/deltaT) allows to pass from continuous time noise to discrete time noise
|
// NOTE 1: (1/deltaT) allows to pass from continuous time noise to discrete time noise
|
||||||
// measurementCovariance_discrete = measurementCovariance_contTime * (1/deltaT)
|
// measurementCovariance_discrete = measurementCovariance_contTime * (1/deltaT)
|
||||||
// NOTE 2: the computation of G * (1/deltaT) * measurementCovariance * G.transpose() is done blockwise,
|
// NOTE 2: the computation of G * (1/deltaT) * measurementCovariance * G.transpose() is done block-wise,
|
||||||
// as G and measurementCovariance are blockdiagonal matrices
|
// as G and measurementCovariance are block-diagonal matrices
|
||||||
preintMeasCov_ = F * preintMeasCov_ * F.transpose();
|
preintMeasCov_ = F * preintMeasCov_ * F.transpose();
|
||||||
preintMeasCov_.block<3, 3>(0, 0) += integrationCovariance() * deltaT;
|
preintMeasCov_.block<3, 3>(0, 0) += integrationCovariance() * deltaT;
|
||||||
preintMeasCov_.block<3, 3>(3, 3) += R_i * accelerometerCovariance()
|
preintMeasCov_.block<3, 3>(3, 3) += R_i * accelerometerCovariance()
|
||||||
|
@ -156,7 +154,8 @@ void ImuFactor::print(const string& s, const KeyFormatter& keyFormatter) const {
|
||||||
<< ")\n";
|
<< ")\n";
|
||||||
ImuFactorBase::print("");
|
ImuFactorBase::print("");
|
||||||
_PIM_.print(" preintegrated measurements:");
|
_PIM_.print(" preintegrated measurements:");
|
||||||
this->noiseModel_->print(" noise model: ");
|
// Print standard deviations on covariance only
|
||||||
|
cout << " noise model sigmas: " << this->noiseModel_->sigmas().transpose() << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------------------------------------------------------
|
//------------------------------------------------------------------------------
|
||||||
|
|
|
@ -110,7 +110,7 @@ public:
|
||||||
* Add a single IMU measurement to the preintegration.
|
* Add a single IMU measurement to the preintegration.
|
||||||
* @param measuredAcc Measured acceleration (in body frame, as given by the sensor)
|
* @param measuredAcc Measured acceleration (in body frame, as given by the sensor)
|
||||||
* @param measuredOmega Measured angular velocity (as given by the sensor)
|
* @param measuredOmega Measured angular velocity (as given by the sensor)
|
||||||
* @param deltaT Time interval between two consecutive IMU measurements
|
* @param deltaT Time interval between this and the last IMU measurement
|
||||||
* @param body_P_sensor Optional sensor frame (pose of the IMU in the body frame)
|
* @param body_P_sensor Optional sensor frame (pose of the IMU in the body frame)
|
||||||
* @param Fout, Gout Jacobians used internally (only needed for testing)
|
* @param Fout, Gout Jacobians used internally (only needed for testing)
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -69,11 +69,8 @@ public:
|
||||||
/// Needed for testable
|
/// Needed for testable
|
||||||
void print(const std::string& s) const {
|
void print(const std::string& s) const {
|
||||||
std::cout << s << std::endl;
|
std::cout << s << std::endl;
|
||||||
std::cout << "deltaTij [" << deltaTij_ << "]" << std::endl;
|
std::cout << " deltaTij [" << deltaTij_ << "]" << std::endl;
|
||||||
deltaRij_.print(" deltaRij ");
|
std::cout << " deltaRij.ypr = (" << deltaRij_.ypr().transpose() << ")" << std::endl;
|
||||||
std::cout << "delRdelBiasOmega [" << delRdelBiasOmega_ << "]" << std::endl;
|
|
||||||
std::cout << "gyroscopeCovariance [" << gyroscopeCovariance_ << "]"
|
|
||||||
<< std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Needed for testable
|
/// Needed for testable
|
||||||
|
|
|
@ -0,0 +1,357 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file PreintegrationBase.h
|
||||||
|
* @author Luca Carlone
|
||||||
|
* @author Stephen Williams
|
||||||
|
* @author Richard Roberts
|
||||||
|
* @author Vadim Indelman
|
||||||
|
* @author David Jensen
|
||||||
|
* @author Frank Dellaert
|
||||||
|
**/
|
||||||
|
|
||||||
|
#include "PreintegrationBase.h"
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
|
||||||
|
PreintegrationBase::PreintegrationBase(const imuBias::ConstantBias& bias,
|
||||||
|
const Matrix3& measuredAccCovariance,
|
||||||
|
const Matrix3& measuredOmegaCovariance,
|
||||||
|
const Matrix3&integrationErrorCovariance,
|
||||||
|
const bool use2ndOrderIntegration)
|
||||||
|
: PreintegratedRotation(measuredOmegaCovariance),
|
||||||
|
biasHat_(bias),
|
||||||
|
use2ndOrderIntegration_(use2ndOrderIntegration),
|
||||||
|
deltaPij_(Vector3::Zero()),
|
||||||
|
deltaVij_(Vector3::Zero()),
|
||||||
|
delPdelBiasAcc_(Z_3x3),
|
||||||
|
delPdelBiasOmega_(Z_3x3),
|
||||||
|
delVdelBiasAcc_(Z_3x3),
|
||||||
|
delVdelBiasOmega_(Z_3x3),
|
||||||
|
accelerometerCovariance_(measuredAccCovariance),
|
||||||
|
integrationCovariance_(integrationErrorCovariance) {
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Needed for testable
|
||||||
|
void PreintegrationBase::print(const std::string& s) const {
|
||||||
|
PreintegratedRotation::print(s);
|
||||||
|
std::cout << " deltaPij [ " << deltaPij_.transpose() << " ]" << std::endl;
|
||||||
|
std::cout << " deltaVij [ " << deltaVij_.transpose() << " ]" << std::endl;
|
||||||
|
biasHat_.print(" biasHat");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Needed for testable
|
||||||
|
bool PreintegrationBase::equals(const PreintegrationBase& other, double tol) const {
|
||||||
|
return PreintegratedRotation::equals(other, tol) && biasHat_.equals(other.biasHat_, tol)
|
||||||
|
&& equal_with_abs_tol(deltaPij_, other.deltaPij_, tol)
|
||||||
|
&& equal_with_abs_tol(deltaVij_, other.deltaVij_, tol)
|
||||||
|
&& equal_with_abs_tol(delPdelBiasAcc_, other.delPdelBiasAcc_, tol)
|
||||||
|
&& equal_with_abs_tol(delPdelBiasOmega_, other.delPdelBiasOmega_, tol)
|
||||||
|
&& equal_with_abs_tol(delVdelBiasAcc_, other.delVdelBiasAcc_, tol)
|
||||||
|
&& equal_with_abs_tol(delVdelBiasOmega_, other.delVdelBiasOmega_, tol)
|
||||||
|
&& equal_with_abs_tol(accelerometerCovariance_, other.accelerometerCovariance_, tol)
|
||||||
|
&& equal_with_abs_tol(integrationCovariance_, other.integrationCovariance_, tol);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Re-initialize PreintegratedMeasurements
|
||||||
|
void PreintegrationBase::resetIntegration() {
|
||||||
|
PreintegratedRotation::resetIntegration();
|
||||||
|
deltaPij_ = Vector3::Zero();
|
||||||
|
deltaVij_ = Vector3::Zero();
|
||||||
|
delPdelBiasAcc_ = Z_3x3;
|
||||||
|
delPdelBiasOmega_ = Z_3x3;
|
||||||
|
delVdelBiasAcc_ = Z_3x3;
|
||||||
|
delVdelBiasOmega_ = Z_3x3;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update preintegrated measurements
|
||||||
|
void PreintegrationBase::updatePreintegratedMeasurements(const Vector3& correctedAcc,
|
||||||
|
const Rot3& incrR, const double deltaT,
|
||||||
|
OptionalJacobian<9, 9> F) {
|
||||||
|
|
||||||
|
const Matrix3 dRij = deltaRij(); // expensive
|
||||||
|
const Vector3 temp = dRij * correctedAcc * deltaT;
|
||||||
|
if (!use2ndOrderIntegration_) {
|
||||||
|
deltaPij_ += deltaVij_ * deltaT;
|
||||||
|
} else {
|
||||||
|
deltaPij_ += deltaVij_ * deltaT + 0.5 * temp * deltaT;
|
||||||
|
}
|
||||||
|
deltaVij_ += temp;
|
||||||
|
|
||||||
|
Matrix3 R_i, F_angles_angles;
|
||||||
|
if (F)
|
||||||
|
R_i = deltaRij(); // has to be executed before updateIntegratedRotationAndDeltaT as that updates deltaRij
|
||||||
|
updateIntegratedRotationAndDeltaT(incrR, deltaT, F ? &F_angles_angles : 0);
|
||||||
|
|
||||||
|
if (F) {
|
||||||
|
const Matrix3 F_vel_angles = -R_i * skewSymmetric(correctedAcc) * deltaT;
|
||||||
|
Matrix3 F_pos_angles;
|
||||||
|
if (use2ndOrderIntegration_)
|
||||||
|
F_pos_angles = 0.5 * F_vel_angles * deltaT;
|
||||||
|
else
|
||||||
|
F_pos_angles = Z_3x3;
|
||||||
|
|
||||||
|
// pos vel angle
|
||||||
|
*F << //
|
||||||
|
I_3x3, I_3x3 * deltaT, F_pos_angles, // pos
|
||||||
|
Z_3x3, I_3x3, F_vel_angles, // vel
|
||||||
|
Z_3x3, Z_3x3, F_angles_angles; // angle
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update Jacobians to be used during preintegration
|
||||||
|
void PreintegrationBase::updatePreintegratedJacobians(const Vector3& correctedAcc,
|
||||||
|
const Matrix3& D_Rincr_integratedOmega,
|
||||||
|
const Rot3& incrR, double deltaT) {
|
||||||
|
const Matrix3 dRij = deltaRij(); // expensive
|
||||||
|
const Matrix3 temp = -dRij * skewSymmetric(correctedAcc) * deltaT * delRdelBiasOmega();
|
||||||
|
if (!use2ndOrderIntegration_) {
|
||||||
|
delPdelBiasAcc_ += delVdelBiasAcc_ * deltaT;
|
||||||
|
delPdelBiasOmega_ += delVdelBiasOmega_ * deltaT;
|
||||||
|
} else {
|
||||||
|
delPdelBiasAcc_ += delVdelBiasAcc_ * deltaT - 0.5 * dRij * deltaT * deltaT;
|
||||||
|
delPdelBiasOmega_ += deltaT * (delVdelBiasOmega_ + temp * 0.5);
|
||||||
|
}
|
||||||
|
delVdelBiasAcc_ += -dRij * deltaT;
|
||||||
|
delVdelBiasOmega_ += temp;
|
||||||
|
update_delRdelBiasOmega(D_Rincr_integratedOmega, incrR, deltaT);
|
||||||
|
}
|
||||||
|
|
||||||
|
void PreintegrationBase::correctMeasurementsByBiasAndSensorPose(
|
||||||
|
const Vector3& measuredAcc, const Vector3& measuredOmega, Vector3& correctedAcc,
|
||||||
|
Vector3& correctedOmega, boost::optional<const Pose3&> body_P_sensor) {
|
||||||
|
correctedAcc = biasHat_.correctAccelerometer(measuredAcc);
|
||||||
|
correctedOmega = biasHat_.correctGyroscope(measuredOmega);
|
||||||
|
|
||||||
|
// Then compensate for sensor-body displacement: we express the quantities
|
||||||
|
// (originally in the IMU frame) into the body frame
|
||||||
|
if (body_P_sensor) {
|
||||||
|
Matrix3 body_R_sensor = body_P_sensor->rotation().matrix();
|
||||||
|
correctedOmega = body_R_sensor * correctedOmega; // rotation rate vector in the body frame
|
||||||
|
Matrix3 body_omega_body__cross = skewSymmetric(correctedOmega);
|
||||||
|
correctedAcc = body_R_sensor * correctedAcc
|
||||||
|
- body_omega_body__cross * body_omega_body__cross * body_P_sensor->translation().vector();
|
||||||
|
// linear acceleration vector in the body frame
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Predict state at time j
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
PoseVelocityBias PreintegrationBase::predict(
|
||||||
|
const Pose3& pose_i, const Vector3& vel_i, const imuBias::ConstantBias& bias_i,
|
||||||
|
const Vector3& gravity, const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis,
|
||||||
|
boost::optional<Vector3&> deltaPij_biascorrected_out,
|
||||||
|
boost::optional<Vector3&> deltaVij_biascorrected_out) const {
|
||||||
|
|
||||||
|
const imuBias::ConstantBias biasIncr = bias_i - biasHat();
|
||||||
|
const Vector3& biasAccIncr = biasIncr.accelerometer();
|
||||||
|
const Vector3& biasOmegaIncr = biasIncr.gyroscope();
|
||||||
|
|
||||||
|
const Rot3& Rot_i = pose_i.rotation();
|
||||||
|
const Matrix3 Rot_i_matrix = Rot_i.matrix();
|
||||||
|
const Vector3 pos_i = pose_i.translation().vector();
|
||||||
|
|
||||||
|
// Predict state at time j
|
||||||
|
/* ---------------------------------------------------------------------------------------------------- */
|
||||||
|
const Vector3 deltaPij_biascorrected = deltaPij() + delPdelBiasAcc() * biasAccIncr
|
||||||
|
+ delPdelBiasOmega() * biasOmegaIncr;
|
||||||
|
if (deltaPij_biascorrected_out) // if desired, store this
|
||||||
|
*deltaPij_biascorrected_out = deltaPij_biascorrected;
|
||||||
|
|
||||||
|
const double dt = deltaTij(), dt2 = dt * dt;
|
||||||
|
Vector3 pos_j = pos_i + Rot_i_matrix * deltaPij_biascorrected + vel_i * dt
|
||||||
|
- omegaCoriolis.cross(vel_i) * dt2 // Coriolis term - we got rid of the 2 wrt ins paper
|
||||||
|
+ 0.5 * gravity * dt2;
|
||||||
|
|
||||||
|
const Vector3 deltaVij_biascorrected = deltaVij() + delVdelBiasAcc() * biasAccIncr
|
||||||
|
+ delVdelBiasOmega() * biasOmegaIncr;
|
||||||
|
if (deltaVij_biascorrected_out) // if desired, store this
|
||||||
|
*deltaVij_biascorrected_out = deltaVij_biascorrected;
|
||||||
|
|
||||||
|
Vector3 vel_j = Vector3(
|
||||||
|
vel_i + Rot_i_matrix * deltaVij_biascorrected - 2 * omegaCoriolis.cross(vel_i) * dt // Coriolis term
|
||||||
|
+ gravity * dt);
|
||||||
|
|
||||||
|
if (use2ndOrderCoriolis) {
|
||||||
|
pos_j += -0.5 * omegaCoriolis.cross(omegaCoriolis.cross(pos_i)) * dt2; // 2nd order coriolis term for position
|
||||||
|
vel_j += -omegaCoriolis.cross(omegaCoriolis.cross(pos_i)) * dt; // 2nd order term for velocity
|
||||||
|
}
|
||||||
|
|
||||||
|
const Rot3 deltaRij_biascorrected = biascorrectedDeltaRij(biasOmegaIncr);
|
||||||
|
// deltaRij_biascorrected = deltaRij * expmap(delRdelBiasOmega * biasOmegaIncr)
|
||||||
|
|
||||||
|
const Vector3 biascorrectedOmega = Rot3::Logmap(deltaRij_biascorrected);
|
||||||
|
const Vector3 correctedOmega = biascorrectedOmega
|
||||||
|
- Rot_i_matrix.transpose() * omegaCoriolis * dt; // Coriolis term
|
||||||
|
const Rot3 correctedDeltaRij = Rot3::Expmap(correctedOmega);
|
||||||
|
const Rot3 Rot_j = Rot_i.compose(correctedDeltaRij);
|
||||||
|
|
||||||
|
const Pose3 pose_j = Pose3(Rot_j, Point3(pos_j));
|
||||||
|
return PoseVelocityBias(pose_j, vel_j, bias_i); // bias is predicted as a constant
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute errors w.r.t. preintegrated measurements and Jacobians wrpt pose_i, vel_i, bias_i, pose_j, bias_j
|
||||||
|
//------------------------------------------------------------------------------
|
||||||
|
Vector9 PreintegrationBase::computeErrorAndJacobians(const Pose3& pose_i, const Vector3& vel_i,
|
||||||
|
const Pose3& pose_j, const Vector3& vel_j,
|
||||||
|
const imuBias::ConstantBias& bias_i,
|
||||||
|
const Vector3& gravity,
|
||||||
|
const Vector3& omegaCoriolis,
|
||||||
|
const bool use2ndOrderCoriolis,
|
||||||
|
OptionalJacobian<9, 6> H1,
|
||||||
|
OptionalJacobian<9, 3> H2,
|
||||||
|
OptionalJacobian<9, 6> H3,
|
||||||
|
OptionalJacobian<9, 3> H4,
|
||||||
|
OptionalJacobian<9, 6> H5) const {
|
||||||
|
|
||||||
|
// We need the mismatch w.r.t. the biases used for preintegration
|
||||||
|
const Vector3 biasOmegaIncr = bias_i.gyroscope() - biasHat().gyroscope();
|
||||||
|
|
||||||
|
// we give some shorter name to rotations and translations
|
||||||
|
const Rot3& Ri = pose_i.rotation();
|
||||||
|
const Matrix3 Ri_transpose_matrix = Ri.transpose();
|
||||||
|
|
||||||
|
const Rot3& Rj = pose_j.rotation();
|
||||||
|
const Vector3 pos_j = pose_j.translation().vector();
|
||||||
|
|
||||||
|
// Evaluate residual error, according to [3]
|
||||||
|
/* ---------------------------------------------------------------------------------------------------- */
|
||||||
|
Vector3 deltaPij_biascorrected, deltaVij_biascorrected;
|
||||||
|
PoseVelocityBias predictedState_j = predict(pose_i, vel_i, bias_i, gravity, omegaCoriolis,
|
||||||
|
use2ndOrderCoriolis, deltaPij_biascorrected,
|
||||||
|
deltaVij_biascorrected);
|
||||||
|
|
||||||
|
// Ri.transpose() is important here to preserve a model with *additive* Gaussian noise of correct covariance
|
||||||
|
const Vector3 fp = Ri_transpose_matrix * (pos_j - predictedState_j.pose.translation().vector());
|
||||||
|
|
||||||
|
// Ri.transpose() is important here to preserve a model with *additive* Gaussian noise of correct covariance
|
||||||
|
const Vector3 fv = Ri_transpose_matrix * (vel_j - predictedState_j.velocity);
|
||||||
|
|
||||||
|
// fR will be computed later. Note: it is the same as: fR = (predictedState_j.pose.translation()).between(Rot_j)
|
||||||
|
|
||||||
|
/* ---------------------------------------------------------------------------------------------------- */
|
||||||
|
// Get Get so<3> version of bias corrected rotation
|
||||||
|
// If H5 is asked for, we will need the Jacobian, which we store in H5
|
||||||
|
// H5 will then be corrected below to take into account the Coriolis effect
|
||||||
|
Matrix3 D_cThetaRij_biasOmegaIncr;
|
||||||
|
const Vector3 biascorrectedOmega = biascorrectedThetaRij(biasOmegaIncr,
|
||||||
|
H5 ? &D_cThetaRij_biasOmegaIncr : 0);
|
||||||
|
|
||||||
|
// Coriolis term, note inconsistent with AHRS, where coriolisHat is *after* integration
|
||||||
|
const Vector3 coriolis = integrateCoriolis(Ri, omegaCoriolis);
|
||||||
|
const Vector3 correctedOmega = biascorrectedOmega - coriolis;
|
||||||
|
|
||||||
|
// Calculate Jacobians, matrices below needed only for some Jacobians...
|
||||||
|
Vector3 fR;
|
||||||
|
Matrix3 D_cDeltaRij_cOmega, D_coriolis, D_fR_fRrot, Ritranspose_omegaCoriolisHat;
|
||||||
|
|
||||||
|
if (H1 || H2)
|
||||||
|
Ritranspose_omegaCoriolisHat = Ri_transpose_matrix * skewSymmetric(omegaCoriolis);
|
||||||
|
|
||||||
|
// This is done to save computation: we ask for the jacobians only when they are needed
|
||||||
|
const double dt = deltaTij(), dt2 = dt*dt;
|
||||||
|
Rot3 fRrot;
|
||||||
|
const Rot3 RiBetweenRj = Rot3(Ri_transpose_matrix) * Rj;
|
||||||
|
if (H1 || H2 || H3 || H4 || H5) {
|
||||||
|
const Rot3 correctedDeltaRij = Rot3::Expmap(correctedOmega, D_cDeltaRij_cOmega);
|
||||||
|
// Residual rotation error
|
||||||
|
fRrot = correctedDeltaRij.between(RiBetweenRj);
|
||||||
|
fR = Rot3::Logmap(fRrot, D_fR_fRrot);
|
||||||
|
D_coriolis = -D_cDeltaRij_cOmega * skewSymmetric(coriolis);
|
||||||
|
} else {
|
||||||
|
const Rot3 correctedDeltaRij = Rot3::Expmap(correctedOmega);
|
||||||
|
// Residual rotation error
|
||||||
|
fRrot = correctedDeltaRij.between(RiBetweenRj);
|
||||||
|
fR = Rot3::Logmap(fRrot);
|
||||||
|
}
|
||||||
|
if (H1) {
|
||||||
|
Matrix3 dfPdPi = -I_3x3, dfVdPi = Z_3x3;
|
||||||
|
if (use2ndOrderCoriolis) {
|
||||||
|
// this is the same as: Ri.transpose() * omegaCoriolisHat * omegaCoriolisHat * Ri.matrix()
|
||||||
|
const Matrix3 temp = Ritranspose_omegaCoriolisHat
|
||||||
|
* (-Ritranspose_omegaCoriolisHat.transpose());
|
||||||
|
dfPdPi += 0.5 * temp * dt2;
|
||||||
|
dfVdPi += temp * dt;
|
||||||
|
}
|
||||||
|
(*H1) <<
|
||||||
|
// dfP/dRi
|
||||||
|
skewSymmetric(fp + deltaPij_biascorrected),
|
||||||
|
// dfP/dPi
|
||||||
|
dfPdPi,
|
||||||
|
// dfV/dRi
|
||||||
|
skewSymmetric(fv + deltaVij_biascorrected),
|
||||||
|
// dfV/dPi
|
||||||
|
dfVdPi,
|
||||||
|
// dfR/dRi
|
||||||
|
D_fR_fRrot * (-Rj.between(Ri).matrix() - fRrot.inverse().matrix() * D_coriolis),
|
||||||
|
// dfR/dPi
|
||||||
|
Z_3x3;
|
||||||
|
}
|
||||||
|
if (H2) {
|
||||||
|
(*H2) <<
|
||||||
|
// dfP/dVi
|
||||||
|
-Ri_transpose_matrix * dt + Ritranspose_omegaCoriolisHat * dt2, // Coriolis term - we got rid of the 2 wrt ins paper
|
||||||
|
// dfV/dVi
|
||||||
|
-Ri_transpose_matrix + 2 * Ritranspose_omegaCoriolisHat * dt, // Coriolis term
|
||||||
|
// dfR/dVi
|
||||||
|
Z_3x3;
|
||||||
|
}
|
||||||
|
if (H3) {
|
||||||
|
(*H3) <<
|
||||||
|
// dfP/dPosej
|
||||||
|
Z_3x3, Ri_transpose_matrix * Rj.matrix(),
|
||||||
|
// dfV/dPosej
|
||||||
|
Matrix::Zero(3, 6),
|
||||||
|
// dfR/dPosej
|
||||||
|
D_fR_fRrot, Z_3x3;
|
||||||
|
}
|
||||||
|
if (H4) {
|
||||||
|
(*H4) <<
|
||||||
|
// dfP/dVj
|
||||||
|
Z_3x3,
|
||||||
|
// dfV/dVj
|
||||||
|
Ri_transpose_matrix,
|
||||||
|
// dfR/dVj
|
||||||
|
Z_3x3;
|
||||||
|
}
|
||||||
|
if (H5) {
|
||||||
|
// H5 by this point already contains 3*3 biascorrectedThetaRij derivative
|
||||||
|
const Matrix3 JbiasOmega = D_cDeltaRij_cOmega * D_cThetaRij_biasOmegaIncr;
|
||||||
|
(*H5) <<
|
||||||
|
// dfP/dBias
|
||||||
|
-delPdelBiasAcc(), -delPdelBiasOmega(),
|
||||||
|
// dfV/dBias
|
||||||
|
-delVdelBiasAcc(), -delVdelBiasOmega(),
|
||||||
|
// dfR/dBias
|
||||||
|
Z_3x3, D_fR_fRrot * (-fRrot.inverse().matrix() * JbiasOmega);
|
||||||
|
}
|
||||||
|
Vector9 r;
|
||||||
|
r << fp, fv, fR;
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
ImuBase::ImuBase()
|
||||||
|
: gravity_(Vector3(0.0, 0.0, 9.81)),
|
||||||
|
omegaCoriolis_(Vector3(0.0, 0.0, 0.0)),
|
||||||
|
body_P_sensor_(boost::none),
|
||||||
|
use2ndOrderCoriolis_(false) {
|
||||||
|
}
|
||||||
|
|
||||||
|
ImuBase::ImuBase(const Vector3& gravity, const Vector3& omegaCoriolis,
|
||||||
|
boost::optional<const Pose3&> body_P_sensor, const bool use2ndOrderCoriolis)
|
||||||
|
: gravity_(gravity),
|
||||||
|
omegaCoriolis_(omegaCoriolis),
|
||||||
|
body_P_sensor_(body_P_sensor),
|
||||||
|
use2ndOrderCoriolis_(use2ndOrderCoriolis) {
|
||||||
|
}
|
||||||
|
|
||||||
|
} /// namespace gtsam
|
|
@ -23,6 +23,9 @@
|
||||||
|
|
||||||
#include <gtsam/navigation/PreintegratedRotation.h>
|
#include <gtsam/navigation/PreintegratedRotation.h>
|
||||||
#include <gtsam/navigation/ImuBias.h>
|
#include <gtsam/navigation/ImuBias.h>
|
||||||
|
#include <gtsam/geometry/Pose3.h>
|
||||||
|
#include <gtsam/base/Matrix.h>
|
||||||
|
#include <gtsam/base/Vector.h>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
|
@ -34,9 +37,10 @@ struct PoseVelocityBias {
|
||||||
Vector3 velocity;
|
Vector3 velocity;
|
||||||
imuBias::ConstantBias bias;
|
imuBias::ConstantBias bias;
|
||||||
|
|
||||||
PoseVelocityBias(const Pose3& _pose, const Vector3& _velocity,
|
PoseVelocityBias(const Pose3& _pose, const Vector3& _velocity, const imuBias::ConstantBias _bias)
|
||||||
const imuBias::ConstantBias _bias) :
|
: pose(_pose),
|
||||||
pose(_pose), velocity(_velocity), bias(_bias) {
|
velocity(_velocity),
|
||||||
|
bias(_bias) {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -46,24 +50,24 @@ struct PoseVelocityBias {
|
||||||
* It includes the definitions of the preintegrated variables and the methods
|
* It includes the definitions of the preintegrated variables and the methods
|
||||||
* to access, print, and compare them.
|
* to access, print, and compare them.
|
||||||
*/
|
*/
|
||||||
class PreintegrationBase: public PreintegratedRotation {
|
class PreintegrationBase : public PreintegratedRotation {
|
||||||
|
|
||||||
imuBias::ConstantBias biasHat_; ///< Acceleration and angular rate bias values used during preintegration
|
imuBias::ConstantBias biasHat_; ///< Acceleration and angular rate bias values used during preintegration
|
||||||
bool use2ndOrderIntegration_; ///< Controls the order of integration
|
bool use2ndOrderIntegration_; ///< Controls the order of integration
|
||||||
|
|
||||||
Vector3 deltaPij_; ///< Preintegrated relative position (does not take into account velocity at time i, see deltap+, in [2]) (in frame i)
|
Vector3 deltaPij_; ///< Preintegrated relative position (does not take into account velocity at time i, see deltap+, in [2]) (in frame i)
|
||||||
Vector3 deltaVij_; ///< Preintegrated relative velocity (in global frame)
|
Vector3 deltaVij_; ///< Preintegrated relative velocity (in global frame)
|
||||||
|
|
||||||
Matrix3 delPdelBiasAcc_; ///< Jacobian of preintegrated position w.r.t. acceleration bias
|
Matrix3 delPdelBiasAcc_; ///< Jacobian of preintegrated position w.r.t. acceleration bias
|
||||||
Matrix3 delPdelBiasOmega_; ///< Jacobian of preintegrated position w.r.t. angular rate bias
|
Matrix3 delPdelBiasOmega_; ///< Jacobian of preintegrated position w.r.t. angular rate bias
|
||||||
Matrix3 delVdelBiasAcc_; ///< Jacobian of preintegrated velocity w.r.t. acceleration bias
|
Matrix3 delVdelBiasAcc_; ///< Jacobian of preintegrated velocity w.r.t. acceleration bias
|
||||||
Matrix3 delVdelBiasOmega_; ///< Jacobian of preintegrated velocity w.r.t. angular rate bias
|
Matrix3 delVdelBiasOmega_; ///< Jacobian of preintegrated velocity w.r.t. angular rate bias
|
||||||
|
|
||||||
Matrix3 accelerometerCovariance_; ///< continuous-time "Covariance" of accelerometer measurements
|
const Matrix3 accelerometerCovariance_; ///< continuous-time "Covariance" of accelerometer measurements
|
||||||
Matrix3 integrationCovariance_; ///< continuous-time "Covariance" describing integration uncertainty
|
const Matrix3 integrationCovariance_; ///< continuous-time "Covariance" describing integration uncertainty
|
||||||
/// (to compensate errors in Euler integration)
|
/// (to compensate errors in Euler integration)
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default constructor, initializes the variables in the base class
|
* Default constructor, initializes the variables in the base class
|
||||||
|
@ -71,18 +75,9 @@ public:
|
||||||
* @param use2ndOrderIntegration Controls the order of integration
|
* @param use2ndOrderIntegration Controls the order of integration
|
||||||
* (if false: p(t+1) = p(t) + v(t) deltaT ; if true: p(t+1) = p(t) + v(t) deltaT + 0.5 * acc(t) deltaT^2)
|
* (if false: p(t+1) = p(t) + v(t) deltaT ; if true: p(t+1) = p(t) + v(t) deltaT + 0.5 * acc(t) deltaT^2)
|
||||||
*/
|
*/
|
||||||
PreintegrationBase(const imuBias::ConstantBias& bias,
|
PreintegrationBase(const imuBias::ConstantBias& bias, const Matrix3& measuredAccCovariance,
|
||||||
const Matrix3& measuredAccCovariance,
|
const Matrix3& measuredOmegaCovariance,
|
||||||
const Matrix3& measuredOmegaCovariance,
|
const Matrix3&integrationErrorCovariance, const bool use2ndOrderIntegration);
|
||||||
const Matrix3&integrationErrorCovariance,
|
|
||||||
const bool use2ndOrderIntegration) :
|
|
||||||
PreintegratedRotation(measuredOmegaCovariance), biasHat_(bias), use2ndOrderIntegration_(
|
|
||||||
use2ndOrderIntegration), deltaPij_(Vector3::Zero()), deltaVij_(
|
|
||||||
Vector3::Zero()), delPdelBiasAcc_(Z_3x3), delPdelBiasOmega_(Z_3x3), delVdelBiasAcc_(
|
|
||||||
Z_3x3), delVdelBiasOmega_(Z_3x3), accelerometerCovariance_(
|
|
||||||
measuredAccCovariance), integrationCovariance_(
|
|
||||||
integrationErrorCovariance) {
|
|
||||||
}
|
|
||||||
|
|
||||||
/// methods to access class variables
|
/// methods to access class variables
|
||||||
bool use2ndOrderIntegration() const {
|
bool use2ndOrderIntegration() const {
|
||||||
|
@ -99,7 +94,7 @@ public:
|
||||||
}
|
}
|
||||||
Vector6 biasHatVector() const {
|
Vector6 biasHatVector() const {
|
||||||
return biasHat_.vector();
|
return biasHat_.vector();
|
||||||
} // expensive
|
} // expensive
|
||||||
const Matrix3& delPdelBiasAcc() const {
|
const Matrix3& delPdelBiasAcc() const {
|
||||||
return delPdelBiasAcc_;
|
return delPdelBiasAcc_;
|
||||||
}
|
}
|
||||||
|
@ -120,325 +115,48 @@ public:
|
||||||
return integrationCovariance_;
|
return integrationCovariance_;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Needed for testable
|
/// print
|
||||||
void print(const std::string& s) const {
|
void print(const std::string& s) const;
|
||||||
PreintegratedRotation::print(s);
|
|
||||||
std::cout << " accelerometerCovariance [ " << accelerometerCovariance_
|
|
||||||
<< " ]" << std::endl;
|
|
||||||
std::cout << " integrationCovariance [ " << integrationCovariance_ << " ]"
|
|
||||||
<< std::endl;
|
|
||||||
std::cout << " deltaPij [ " << deltaPij_.transpose() << " ]" << std::endl;
|
|
||||||
std::cout << " deltaVij [ " << deltaVij_.transpose() << " ]" << std::endl;
|
|
||||||
biasHat_.print(" biasHat");
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Needed for testable
|
/// check equality
|
||||||
bool equals(const PreintegrationBase& other, double tol) const {
|
bool equals(const PreintegrationBase& other, double tol) const;
|
||||||
return PreintegratedRotation::equals(other, tol)
|
|
||||||
&& biasHat_.equals(other.biasHat_, tol)
|
|
||||||
&& equal_with_abs_tol(deltaPij_, other.deltaPij_, tol)
|
|
||||||
&& equal_with_abs_tol(deltaVij_, other.deltaVij_, tol)
|
|
||||||
&& equal_with_abs_tol(delPdelBiasAcc_, other.delPdelBiasAcc_, tol)
|
|
||||||
&& equal_with_abs_tol(delPdelBiasOmega_, other.delPdelBiasOmega_, tol)
|
|
||||||
&& equal_with_abs_tol(delVdelBiasAcc_, other.delVdelBiasAcc_, tol)
|
|
||||||
&& equal_with_abs_tol(delVdelBiasOmega_, other.delVdelBiasOmega_, tol)
|
|
||||||
&& equal_with_abs_tol(accelerometerCovariance_,
|
|
||||||
other.accelerometerCovariance_, tol)
|
|
||||||
&& equal_with_abs_tol(integrationCovariance_,
|
|
||||||
other.integrationCovariance_, tol);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Re-initialize PreintegratedMeasurements
|
/// Re-initialize PreintegratedMeasurements
|
||||||
void resetIntegration() {
|
void resetIntegration();
|
||||||
PreintegratedRotation::resetIntegration();
|
|
||||||
deltaPij_ = Vector3::Zero();
|
|
||||||
deltaVij_ = Vector3::Zero();
|
|
||||||
delPdelBiasAcc_ = Z_3x3;
|
|
||||||
delPdelBiasOmega_ = Z_3x3;
|
|
||||||
delVdelBiasAcc_ = Z_3x3;
|
|
||||||
delVdelBiasOmega_ = Z_3x3;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update preintegrated measurements
|
/// Update preintegrated measurements
|
||||||
void updatePreintegratedMeasurements(const Vector3& correctedAcc,
|
void updatePreintegratedMeasurements(const Vector3& correctedAcc, const Rot3& incrR,
|
||||||
const Rot3& incrR, const double deltaT, OptionalJacobian<9, 9> F) {
|
const double deltaT, OptionalJacobian<9, 9> F);
|
||||||
|
|
||||||
Matrix3 dRij = deltaRij(); // expensive
|
|
||||||
Vector3 temp = dRij * correctedAcc * deltaT;
|
|
||||||
if (!use2ndOrderIntegration_) {
|
|
||||||
deltaPij_ += deltaVij_ * deltaT;
|
|
||||||
} else {
|
|
||||||
deltaPij_ += deltaVij_ * deltaT + 0.5 * temp * deltaT;
|
|
||||||
}
|
|
||||||
deltaVij_ += temp;
|
|
||||||
|
|
||||||
Matrix3 R_i, F_angles_angles;
|
|
||||||
if (F)
|
|
||||||
R_i = deltaRij(); // has to be executed before updateIntegratedRotationAndDeltaT as that updates deltaRij
|
|
||||||
updateIntegratedRotationAndDeltaT(incrR, deltaT, F ? &F_angles_angles : 0);
|
|
||||||
|
|
||||||
if (F) {
|
|
||||||
Matrix3 F_vel_angles = -R_i * skewSymmetric(correctedAcc) * deltaT;
|
|
||||||
Matrix3 F_pos_angles;
|
|
||||||
if (use2ndOrderIntegration_)
|
|
||||||
F_pos_angles = 0.5 * F_vel_angles * deltaT;
|
|
||||||
else
|
|
||||||
F_pos_angles = Z_3x3;
|
|
||||||
|
|
||||||
// pos vel angle
|
|
||||||
*F << //
|
|
||||||
I_3x3, I_3x3 * deltaT, F_pos_angles, // pos
|
|
||||||
Z_3x3, I_3x3, F_vel_angles, // vel
|
|
||||||
Z_3x3, Z_3x3, F_angles_angles; // angle
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update Jacobians to be used during preintegration
|
/// Update Jacobians to be used during preintegration
|
||||||
void updatePreintegratedJacobians(const Vector3& correctedAcc,
|
void updatePreintegratedJacobians(const Vector3& correctedAcc,
|
||||||
const Matrix3& D_Rincr_integratedOmega, const Rot3& incrR,
|
const Matrix3& D_Rincr_integratedOmega, const Rot3& incrR,
|
||||||
double deltaT) {
|
double deltaT);
|
||||||
Matrix3 dRij = deltaRij(); // expensive
|
|
||||||
Matrix3 temp = -dRij * skewSymmetric(correctedAcc) * deltaT
|
|
||||||
* delRdelBiasOmega();
|
|
||||||
if (!use2ndOrderIntegration_) {
|
|
||||||
delPdelBiasAcc_ += delVdelBiasAcc_ * deltaT;
|
|
||||||
delPdelBiasOmega_ += delVdelBiasOmega_ * deltaT;
|
|
||||||
} else {
|
|
||||||
delPdelBiasAcc_ += delVdelBiasAcc_ * deltaT
|
|
||||||
- 0.5 * dRij * deltaT * deltaT;
|
|
||||||
delPdelBiasOmega_ += deltaT * (delVdelBiasOmega_ + temp * 0.5);
|
|
||||||
}
|
|
||||||
delVdelBiasAcc_ += -dRij * deltaT;
|
|
||||||
delVdelBiasOmega_ += temp;
|
|
||||||
update_delRdelBiasOmega(D_Rincr_integratedOmega, incrR, deltaT);
|
|
||||||
}
|
|
||||||
|
|
||||||
void correctMeasurementsByBiasAndSensorPose(const Vector3& measuredAcc,
|
void correctMeasurementsByBiasAndSensorPose(const Vector3& measuredAcc,
|
||||||
const Vector3& measuredOmega, Vector3& correctedAcc,
|
const Vector3& measuredOmega, Vector3& correctedAcc,
|
||||||
Vector3& correctedOmega, boost::optional<const Pose3&> body_P_sensor) {
|
Vector3& correctedOmega,
|
||||||
correctedAcc = biasHat_.correctAccelerometer(measuredAcc);
|
boost::optional<const Pose3&> body_P_sensor);
|
||||||
correctedOmega = biasHat_.correctGyroscope(measuredOmega);
|
|
||||||
|
|
||||||
// Then compensate for sensor-body displacement: we express the quantities
|
|
||||||
// (originally in the IMU frame) into the body frame
|
|
||||||
if (body_P_sensor) {
|
|
||||||
Matrix3 body_R_sensor = body_P_sensor->rotation().matrix();
|
|
||||||
correctedOmega = body_R_sensor * correctedOmega; // rotation rate vector in the body frame
|
|
||||||
Matrix3 body_omega_body__cross = skewSymmetric(correctedOmega);
|
|
||||||
correctedAcc = body_R_sensor * correctedAcc
|
|
||||||
- body_omega_body__cross * body_omega_body__cross
|
|
||||||
* body_P_sensor->translation().vector();
|
|
||||||
// linear acceleration vector in the body frame
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Predict state at time j
|
/// Predict state at time j
|
||||||
//------------------------------------------------------------------------------
|
PoseVelocityBias predict(
|
||||||
PoseVelocityBias predict(const Pose3& pose_i, const Vector3& vel_i,
|
const Pose3& pose_i, const Vector3& vel_i, const imuBias::ConstantBias& bias_i,
|
||||||
const imuBias::ConstantBias& bias_i, const Vector3& gravity,
|
const Vector3& gravity, const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis = false,
|
||||||
const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis = false,
|
|
||||||
boost::optional<Vector3&> deltaPij_biascorrected_out = boost::none,
|
boost::optional<Vector3&> deltaPij_biascorrected_out = boost::none,
|
||||||
boost::optional<Vector3&> deltaVij_biascorrected_out = boost::none) const {
|
boost::optional<Vector3&> deltaVij_biascorrected_out = boost::none) const;
|
||||||
|
|
||||||
const imuBias::ConstantBias biasIncr = bias_i - biasHat();
|
|
||||||
const Vector3& biasAccIncr = biasIncr.accelerometer();
|
|
||||||
const Vector3& biasOmegaIncr = biasIncr.gyroscope();
|
|
||||||
|
|
||||||
const Rot3& Rot_i = pose_i.rotation();
|
|
||||||
const Matrix3& Rot_i_matrix = Rot_i.matrix();
|
|
||||||
const Vector3& pos_i = pose_i.translation().vector();
|
|
||||||
|
|
||||||
// Predict state at time j
|
|
||||||
/* ---------------------------------------------------------------------------------------------------- */
|
|
||||||
Vector3 deltaPij_biascorrected = deltaPij() + delPdelBiasAcc() * biasAccIncr
|
|
||||||
+ delPdelBiasOmega() * biasOmegaIncr;
|
|
||||||
if (deltaPij_biascorrected_out) // if desired, store this
|
|
||||||
*deltaPij_biascorrected_out = deltaPij_biascorrected;
|
|
||||||
|
|
||||||
Vector3 pos_j = pos_i + Rot_i_matrix * deltaPij_biascorrected
|
|
||||||
+ vel_i * deltaTij()
|
|
||||||
- omegaCoriolis.cross(vel_i) * deltaTij() * deltaTij() // Coriolis term - we got rid of the 2 wrt ins paper
|
|
||||||
+ 0.5 * gravity * deltaTij() * deltaTij();
|
|
||||||
|
|
||||||
Vector3 deltaVij_biascorrected = deltaVij() + delVdelBiasAcc() * biasAccIncr
|
|
||||||
+ delVdelBiasOmega() * biasOmegaIncr;
|
|
||||||
if (deltaVij_biascorrected_out) // if desired, store this
|
|
||||||
*deltaVij_biascorrected_out = deltaVij_biascorrected;
|
|
||||||
|
|
||||||
Vector3 vel_j = Vector3(
|
|
||||||
vel_i + Rot_i_matrix * deltaVij_biascorrected
|
|
||||||
- 2 * omegaCoriolis.cross(vel_i) * deltaTij() // Coriolis term
|
|
||||||
+ gravity * deltaTij());
|
|
||||||
|
|
||||||
if (use2ndOrderCoriolis) {
|
|
||||||
pos_j += -0.5 * omegaCoriolis.cross(omegaCoriolis.cross(pos_i))
|
|
||||||
* deltaTij() * deltaTij(); // 2nd order coriolis term for position
|
|
||||||
vel_j += -omegaCoriolis.cross(omegaCoriolis.cross(pos_i)) * deltaTij(); // 2nd order term for velocity
|
|
||||||
}
|
|
||||||
|
|
||||||
const Rot3 deltaRij_biascorrected = biascorrectedDeltaRij(biasOmegaIncr);
|
|
||||||
// deltaRij_biascorrected = deltaRij * expmap(delRdelBiasOmega * biasOmegaIncr)
|
|
||||||
|
|
||||||
Vector3 biascorrectedOmega = Rot3::Logmap(deltaRij_biascorrected);
|
|
||||||
Vector3 correctedOmega = biascorrectedOmega
|
|
||||||
- Rot_i_matrix.transpose() * omegaCoriolis * deltaTij(); // Coriolis term
|
|
||||||
const Rot3 correctedDeltaRij = Rot3::Expmap(correctedOmega);
|
|
||||||
const Rot3 Rot_j = Rot_i.compose(correctedDeltaRij);
|
|
||||||
|
|
||||||
Pose3 pose_j = Pose3(Rot_j, Point3(pos_j));
|
|
||||||
return PoseVelocityBias(pose_j, vel_j, bias_i); // bias is predicted as a constant
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compute errors w.r.t. preintegrated measurements and jacobians wrt pose_i, vel_i, bias_i, pose_j, bias_j
|
/// Compute errors w.r.t. preintegrated measurements and jacobians wrt pose_i, vel_i, bias_i, pose_j, bias_j
|
||||||
//------------------------------------------------------------------------------
|
Vector9 computeErrorAndJacobians(const Pose3& pose_i, const Vector3& vel_i, const Pose3& pose_j,
|
||||||
Vector9 computeErrorAndJacobians(const Pose3& pose_i, const Vector3& vel_i,
|
const Vector3& vel_j, const imuBias::ConstantBias& bias_i,
|
||||||
const Pose3& pose_j, const Vector3& vel_j,
|
const Vector3& gravity, const Vector3& omegaCoriolis,
|
||||||
const imuBias::ConstantBias& bias_i, const Vector3& gravity,
|
const bool use2ndOrderCoriolis, OptionalJacobian<9, 6> H1 =
|
||||||
const Vector3& omegaCoriolis, const bool use2ndOrderCoriolis,
|
boost::none,
|
||||||
OptionalJacobian<9, 6> H1 = boost::none, OptionalJacobian<9, 3> H2 =
|
OptionalJacobian<9, 3> H2 = boost::none,
|
||||||
boost::none, OptionalJacobian<9, 6> H3 = boost::none,
|
OptionalJacobian<9, 6> H3 = boost::none,
|
||||||
OptionalJacobian<9, 3> H4 = boost::none, OptionalJacobian<9, 6> H5 =
|
OptionalJacobian<9, 3> H4 = boost::none,
|
||||||
boost::none) const {
|
OptionalJacobian<9, 6> H5 = boost::none) const;
|
||||||
|
|
||||||
// We need the mismatch w.r.t. the biases used for preintegration
|
private:
|
||||||
const Vector3 biasOmegaIncr = bias_i.gyroscope() - biasHat().gyroscope();
|
|
||||||
|
|
||||||
// we give some shorter name to rotations and translations
|
|
||||||
const Rot3& Ri = pose_i.rotation();
|
|
||||||
const Rot3& Ri_transpose = Ri.transpose();
|
|
||||||
const Matrix& Ri_transpose_matrix = Ri_transpose.matrix();
|
|
||||||
|
|
||||||
const Rot3& Rj = pose_j.rotation();
|
|
||||||
|
|
||||||
const Vector3& pos_j = pose_j.translation().vector();
|
|
||||||
|
|
||||||
// Evaluate residual error, according to [3]
|
|
||||||
/* ---------------------------------------------------------------------------------------------------- */
|
|
||||||
Vector3 deltaPij_biascorrected, deltaVij_biascorrected;
|
|
||||||
PoseVelocityBias predictedState_j = predict(pose_i, vel_i, bias_i, gravity,
|
|
||||||
omegaCoriolis, use2ndOrderCoriolis, deltaPij_biascorrected,
|
|
||||||
deltaVij_biascorrected);
|
|
||||||
|
|
||||||
// Ri.transpose() is important here to preserve a model with *additive* Gaussian noise of correct covariance
|
|
||||||
const Vector3 fp = Ri_transpose_matrix
|
|
||||||
* (pos_j - predictedState_j.pose.translation().vector());
|
|
||||||
|
|
||||||
// Ri.transpose() is important here to preserve a model with *additive* Gaussian noise of correct covariance
|
|
||||||
const Vector3 fv = Ri_transpose_matrix * (vel_j - predictedState_j.velocity);
|
|
||||||
|
|
||||||
// fR will be computed later. Note: it is the same as: fR = (predictedState_j.pose.translation()).between(Rot_j)
|
|
||||||
|
|
||||||
/* ---------------------------------------------------------------------------------------------------- */
|
|
||||||
// Get Get so<3> version of bias corrected rotation
|
|
||||||
// If H5 is asked for, we will need the Jacobian, which we store in H5
|
|
||||||
// H5 will then be corrected below to take into account the Coriolis effect
|
|
||||||
Matrix3 D_cThetaRij_biasOmegaIncr;
|
|
||||||
Vector3 biascorrectedOmega = biascorrectedThetaRij(biasOmegaIncr,
|
|
||||||
H5 ? &D_cThetaRij_biasOmegaIncr : 0);
|
|
||||||
|
|
||||||
// Coriolis term, note inconsistent with AHRS, where coriolisHat is *after* integration
|
|
||||||
const Vector3 coriolis = integrateCoriolis(Ri, omegaCoriolis);
|
|
||||||
Vector3 correctedOmega = biascorrectedOmega - coriolis;
|
|
||||||
|
|
||||||
// Calculate Jacobians, matrices below needed only for some Jacobians...
|
|
||||||
Vector3 fR;
|
|
||||||
Rot3 correctedDeltaRij, fRrot;
|
|
||||||
Matrix3 D_cDeltaRij_cOmega, D_coriolis, D_fR_fRrot,
|
|
||||||
Ritranspose_omegaCoriolisHat;
|
|
||||||
|
|
||||||
if (H1 || H2)
|
|
||||||
Ritranspose_omegaCoriolisHat = Ri_transpose_matrix
|
|
||||||
* skewSymmetric(omegaCoriolis);
|
|
||||||
|
|
||||||
// This is done to save computation: we ask for the jacobians only when they are needed
|
|
||||||
Rot3 RiBetweenRj = Ri_transpose*Rj;
|
|
||||||
if (H1 || H2 || H3 || H4 || H5) {
|
|
||||||
correctedDeltaRij = Rot3::Expmap(correctedOmega, D_cDeltaRij_cOmega);
|
|
||||||
// Residual rotation error
|
|
||||||
fRrot = correctedDeltaRij.between(RiBetweenRj);
|
|
||||||
fR = Rot3::Logmap(fRrot, D_fR_fRrot);
|
|
||||||
D_coriolis = -D_cDeltaRij_cOmega * skewSymmetric(coriolis);
|
|
||||||
} else {
|
|
||||||
correctedDeltaRij = Rot3::Expmap(correctedOmega);
|
|
||||||
// Residual rotation error
|
|
||||||
fRrot = correctedDeltaRij.between(RiBetweenRj);
|
|
||||||
fR = Rot3::Logmap(fRrot);
|
|
||||||
}
|
|
||||||
if (H1) {
|
|
||||||
H1->resize(9, 6);
|
|
||||||
Matrix3 dfPdPi = -I_3x3;
|
|
||||||
Matrix3 dfVdPi = Z_3x3;
|
|
||||||
if (use2ndOrderCoriolis) {
|
|
||||||
// this is the same as: Ri.transpose() * omegaCoriolisHat * omegaCoriolisHat * Ri.matrix()
|
|
||||||
Matrix3 temp = Ritranspose_omegaCoriolisHat
|
|
||||||
* (-Ritranspose_omegaCoriolisHat.transpose());
|
|
||||||
dfPdPi += 0.5 * temp * deltaTij() * deltaTij();
|
|
||||||
dfVdPi += temp * deltaTij();
|
|
||||||
}
|
|
||||||
(*H1) <<
|
|
||||||
// dfP/dRi
|
|
||||||
skewSymmetric(fp + deltaPij_biascorrected),
|
|
||||||
// dfP/dPi
|
|
||||||
dfPdPi,
|
|
||||||
// dfV/dRi
|
|
||||||
skewSymmetric(fv + deltaVij_biascorrected),
|
|
||||||
// dfV/dPi
|
|
||||||
dfVdPi,
|
|
||||||
// dfR/dRi
|
|
||||||
D_fR_fRrot
|
|
||||||
* (-Rj.between(Ri).matrix() - fRrot.inverse().matrix() * D_coriolis),
|
|
||||||
// dfR/dPi
|
|
||||||
Z_3x3;
|
|
||||||
}
|
|
||||||
if (H2) {
|
|
||||||
H2->resize(9, 3);
|
|
||||||
(*H2) <<
|
|
||||||
// dfP/dVi
|
|
||||||
- Ri_transpose_matrix * deltaTij()
|
|
||||||
+ Ritranspose_omegaCoriolisHat * deltaTij() * deltaTij(), // Coriolis term - we got rid of the 2 wrt ins paper
|
|
||||||
// dfV/dVi
|
|
||||||
- Ri_transpose_matrix + 2 * Ritranspose_omegaCoriolisHat * deltaTij(), // Coriolis term
|
|
||||||
// dfR/dVi
|
|
||||||
Z_3x3;
|
|
||||||
}
|
|
||||||
if (H3) {
|
|
||||||
H3->resize(9, 6);
|
|
||||||
(*H3) <<
|
|
||||||
// dfP/dPosej
|
|
||||||
Z_3x3, Ri_transpose_matrix * Rj.matrix(),
|
|
||||||
// dfV/dPosej
|
|
||||||
Matrix::Zero(3, 6),
|
|
||||||
// dfR/dPosej
|
|
||||||
D_fR_fRrot, Z_3x3;
|
|
||||||
}
|
|
||||||
if (H4) {
|
|
||||||
H4->resize(9, 3);
|
|
||||||
(*H4) <<
|
|
||||||
// dfP/dVj
|
|
||||||
Z_3x3,
|
|
||||||
// dfV/dVj
|
|
||||||
Ri_transpose_matrix,
|
|
||||||
// dfR/dVj
|
|
||||||
Z_3x3;
|
|
||||||
}
|
|
||||||
if (H5) {
|
|
||||||
// H5 by this point already contains 3*3 biascorrectedThetaRij derivative
|
|
||||||
const Matrix3 JbiasOmega = D_cDeltaRij_cOmega * D_cThetaRij_biasOmegaIncr;
|
|
||||||
H5->resize(9, 6);
|
|
||||||
(*H5) <<
|
|
||||||
// dfP/dBias
|
|
||||||
-delPdelBiasAcc(), -delPdelBiasOmega(),
|
|
||||||
// dfV/dBias
|
|
||||||
-delVdelBiasAcc(), -delVdelBiasOmega(),
|
|
||||||
// dfR/dBias
|
|
||||||
Z_3x3, D_fR_fRrot * (-fRrot.inverse().matrix() * JbiasOmega);
|
|
||||||
}
|
|
||||||
Vector9 r;
|
|
||||||
r << fp, fv, fR;
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
/** Serialization function */
|
/** Serialization function */
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
template<class ARCHIVE>
|
template<class ARCHIVE>
|
||||||
|
@ -456,26 +174,22 @@ private:
|
||||||
|
|
||||||
class ImuBase {
|
class ImuBase {
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
Vector3 gravity_;
|
Vector3 gravity_;
|
||||||
Vector3 omegaCoriolis_;
|
Vector3 omegaCoriolis_;
|
||||||
boost::optional<Pose3> body_P_sensor_; ///< The pose of the sensor in the body frame
|
boost::optional<Pose3> body_P_sensor_; ///< The pose of the sensor in the body frame
|
||||||
bool use2ndOrderCoriolis_; ///< Controls whether higher order terms are included when calculating the Coriolis Effect
|
bool use2ndOrderCoriolis_; ///< Controls whether higher order terms are included when calculating the Coriolis Effect
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
ImuBase() :
|
/// Default constructor, with decent gravity and zero coriolis
|
||||||
gravity_(Vector3(0.0, 0.0, 9.81)), omegaCoriolis_(Vector3(0.0, 0.0, 0.0)), body_P_sensor_(
|
ImuBase();
|
||||||
boost::none), use2ndOrderCoriolis_(false) {
|
|
||||||
}
|
|
||||||
|
|
||||||
|
/// Fully fledge constructor
|
||||||
ImuBase(const Vector3& gravity, const Vector3& omegaCoriolis,
|
ImuBase(const Vector3& gravity, const Vector3& omegaCoriolis,
|
||||||
boost::optional<const Pose3&> body_P_sensor = boost::none,
|
boost::optional<const Pose3&> body_P_sensor = boost::none,
|
||||||
const bool use2ndOrderCoriolis = false) :
|
const bool use2ndOrderCoriolis = false);
|
||||||
gravity_(gravity), omegaCoriolis_(omegaCoriolis), body_P_sensor_(
|
|
||||||
body_P_sensor), use2ndOrderCoriolis_(use2ndOrderCoriolis) {
|
|
||||||
}
|
|
||||||
|
|
||||||
const Vector3& gravity() const {
|
const Vector3& gravity() const {
|
||||||
return gravity_;
|
return gravity_;
|
||||||
|
@ -486,4 +200,4 @@ public:
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} /// namespace gtsam
|
} /// namespace gtsam
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
|
|
||||||
#include <gtsam/navigation/ImuFactor.h>
|
#include <gtsam/navigation/ImuFactor.h>
|
||||||
#include <gtsam/nonlinear/Values.h>
|
#include <gtsam/nonlinear/Values.h>
|
||||||
|
#include <gtsam/nonlinear/factorTesting.h>
|
||||||
#include <gtsam/inference/Symbol.h>
|
#include <gtsam/inference/Symbol.h>
|
||||||
#include <gtsam/navigation/ImuBias.h>
|
#include <gtsam/navigation/ImuBias.h>
|
||||||
#include <gtsam/geometry/Pose3.h>
|
#include <gtsam/geometry/Pose3.h>
|
||||||
|
@ -81,19 +82,27 @@ Rot3 updatePreintegratedRot(const Rot3& deltaRij_old,
|
||||||
return deltaRij_new;
|
return deltaRij_new;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Auxiliary functions to test preintegrated Jacobians
|
// Define covariance matrices
|
||||||
// delPdelBiasAcc_ delPdelBiasOmega_ delVdelBiasAcc_ delVdelBiasOmega_ delRdelBiasOmega_
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
double accNoiseVar = 0.01;
|
double accNoiseVar = 0.01;
|
||||||
double omegaNoiseVar = 0.03;
|
double omegaNoiseVar = 0.03;
|
||||||
double intNoiseVar = 0.0001;
|
double intNoiseVar = 0.0001;
|
||||||
|
const Matrix3 kMeasuredAccCovariance = accNoiseVar * Matrix3::Identity();
|
||||||
|
const Matrix3 kMeasuredOmegaCovariance = omegaNoiseVar * Matrix3::Identity();
|
||||||
|
const Matrix3 kIntegrationErrorCovariance = intNoiseVar * Matrix3::Identity();
|
||||||
|
|
||||||
|
// Auxiliary functions to test preintegrated Jacobians
|
||||||
|
// delPdelBiasAcc_ delPdelBiasOmega_ delVdelBiasAcc_ delVdelBiasOmega_ delRdelBiasOmega_
|
||||||
|
/* ************************************************************************* */
|
||||||
ImuFactor::PreintegratedMeasurements evaluatePreintegratedMeasurements(
|
ImuFactor::PreintegratedMeasurements evaluatePreintegratedMeasurements(
|
||||||
const imuBias::ConstantBias& bias, const list<Vector3>& measuredAccs,
|
const imuBias::ConstantBias& bias, const list<Vector3>& measuredAccs,
|
||||||
const list<Vector3>& measuredOmegas, const list<double>& deltaTs,
|
const list<Vector3>& measuredOmegas, const list<double>& deltaTs,
|
||||||
const bool use2ndOrderIntegration = false) {
|
const bool use2ndOrderIntegration = false) {
|
||||||
ImuFactor::PreintegratedMeasurements result(bias,
|
ImuFactor::PreintegratedMeasurements result(bias,
|
||||||
accNoiseVar * Matrix3::Identity(), omegaNoiseVar * Matrix3::Identity(),
|
kMeasuredAccCovariance,
|
||||||
intNoiseVar * Matrix3::Identity(), use2ndOrderIntegration);
|
kMeasuredOmegaCovariance,
|
||||||
|
kIntegrationErrorCovariance,
|
||||||
|
use2ndOrderIntegration);
|
||||||
|
|
||||||
list<Vector3>::const_iterator itAcc = measuredAccs.begin();
|
list<Vector3>::const_iterator itAcc = measuredAccs.begin();
|
||||||
list<Vector3>::const_iterator itOmega = measuredOmegas.begin();
|
list<Vector3>::const_iterator itOmega = measuredOmegas.begin();
|
||||||
|
@ -156,8 +165,11 @@ TEST( ImuFactor, PreintegratedMeasurements ) {
|
||||||
|
|
||||||
bool use2ndOrderIntegration = true;
|
bool use2ndOrderIntegration = true;
|
||||||
// Actual preintegrated values
|
// Actual preintegrated values
|
||||||
ImuFactor::PreintegratedMeasurements actual1(bias, Matrix3::Zero(),
|
ImuFactor::PreintegratedMeasurements actual1(bias,
|
||||||
Matrix3::Zero(), Matrix3::Zero(), use2ndOrderIntegration);
|
kMeasuredAccCovariance,
|
||||||
|
kMeasuredOmegaCovariance,
|
||||||
|
kIntegrationErrorCovariance,
|
||||||
|
use2ndOrderIntegration);
|
||||||
actual1.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
actual1.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
||||||
|
|
||||||
EXPECT(
|
EXPECT(
|
||||||
|
@ -208,8 +220,11 @@ TEST( ImuFactor, ErrorAndJacobians ) {
|
||||||
Vector3 measuredAcc = x1.rotation().unrotate(-Point3(gravity)).vector();
|
Vector3 measuredAcc = x1.rotation().unrotate(-Point3(gravity)).vector();
|
||||||
double deltaT = 1.0;
|
double deltaT = 1.0;
|
||||||
bool use2ndOrderIntegration = true;
|
bool use2ndOrderIntegration = true;
|
||||||
ImuFactor::PreintegratedMeasurements pre_int_data(bias, Matrix3::Zero(),
|
ImuFactor::PreintegratedMeasurements pre_int_data(bias,
|
||||||
Matrix3::Zero(), Matrix3::Zero(), use2ndOrderIntegration);
|
kMeasuredAccCovariance,
|
||||||
|
kMeasuredOmegaCovariance,
|
||||||
|
kIntegrationErrorCovariance,
|
||||||
|
use2ndOrderIntegration);
|
||||||
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
||||||
|
|
||||||
// Create factor
|
// Create factor
|
||||||
|
@ -260,6 +275,40 @@ TEST( ImuFactor, ErrorAndJacobians ) {
|
||||||
Matrix H5e = numericalDerivative11<Vector, imuBias::ConstantBias>(
|
Matrix H5e = numericalDerivative11<Vector, imuBias::ConstantBias>(
|
||||||
boost::bind(&callEvaluateError, factor, x1, v1, x2, v2, _1), bias);
|
boost::bind(&callEvaluateError, factor, x1, v1, x2, v2, _1), bias);
|
||||||
EXPECT(assert_equal(H5e, H5a));
|
EXPECT(assert_equal(H5e, H5a));
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Evaluate error with wrong values
|
||||||
|
Vector3 v2_wrong = v2 + Vector3(0.1, 0.1, 0.1);
|
||||||
|
errorActual = factor.evaluateError(x1, v1, x2, v2_wrong, bias);
|
||||||
|
errorExpected << 0, 0, 0, 0.0724744871, 0.040715657, 0.151952901, 0, 0, 0;
|
||||||
|
EXPECT(assert_equal(errorExpected, errorActual, 1e-6));
|
||||||
|
|
||||||
|
Values values;
|
||||||
|
values.insert(X(1), x1);
|
||||||
|
values.insert(V(1), v1);
|
||||||
|
values.insert(X(2), x2);
|
||||||
|
values.insert(V(2), v2_wrong);
|
||||||
|
values.insert(B(1), bias);
|
||||||
|
errorExpected << 0, 0, 0, 0.0724744871, 0.040715657, 0.151952901, 0, 0, 0;
|
||||||
|
EXPECT(assert_equal(factor.unwhitenedError(values), errorActual, 1e-6));
|
||||||
|
|
||||||
|
// Make sure the whitening is done correctly
|
||||||
|
Matrix cov = pre_int_data.preintMeasCov();
|
||||||
|
Matrix R = RtR(cov.inverse());
|
||||||
|
Vector whitened = R * errorActual;
|
||||||
|
EXPECT(assert_equal(0.5 * whitened.squaredNorm(), factor.error(values), 1e-6));
|
||||||
|
|
||||||
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Make sure linearization is correct
|
||||||
|
// Create expected value by numerical differentiation
|
||||||
|
JacobianFactor expected = linearizeNumerically(factor, values, 1e-8);
|
||||||
|
|
||||||
|
// Create actual value by linearize
|
||||||
|
GaussianFactor::shared_ptr linearized = factor.linearize(values);
|
||||||
|
JacobianFactor* actual = dynamic_cast<JacobianFactor*>(linearized.get());
|
||||||
|
|
||||||
|
// Check cast result and then equality
|
||||||
|
EXPECT(assert_equal(expected, *actual, 1e-4));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
@ -284,8 +333,8 @@ TEST( ImuFactor, ErrorAndJacobianWithBiases ) {
|
||||||
double deltaT = 1.0;
|
double deltaT = 1.0;
|
||||||
|
|
||||||
ImuFactor::PreintegratedMeasurements pre_int_data(
|
ImuFactor::PreintegratedMeasurements pre_int_data(
|
||||||
imuBias::ConstantBias(Vector3(0.2, 0.0, 0.0), Vector3(0.0, 0.0, 0.1)),
|
imuBias::ConstantBias(Vector3(0.2, 0.0, 0.0), Vector3(0.0, 0.0, 0.1)), kMeasuredAccCovariance,
|
||||||
Matrix3::Zero(), Matrix3::Zero(), Matrix3::Zero());
|
kMeasuredOmegaCovariance, kIntegrationErrorCovariance);
|
||||||
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
||||||
|
|
||||||
// Create factor
|
// Create factor
|
||||||
|
@ -354,8 +403,8 @@ TEST( ImuFactor, ErrorAndJacobianWith2ndOrderCoriolis ) {
|
||||||
double deltaT = 1.0;
|
double deltaT = 1.0;
|
||||||
|
|
||||||
ImuFactor::PreintegratedMeasurements pre_int_data(
|
ImuFactor::PreintegratedMeasurements pre_int_data(
|
||||||
imuBias::ConstantBias(Vector3(0.2, 0.0, 0.0), Vector3(0.0, 0.0, 0.1)),
|
imuBias::ConstantBias(Vector3(0.2, 0.0, 0.0), Vector3(0.0, 0.0, 0.1)), kMeasuredAccCovariance,
|
||||||
Matrix3::Zero(), Matrix3::Zero(), Matrix3::Zero());
|
kMeasuredOmegaCovariance, kIntegrationErrorCovariance);
|
||||||
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
||||||
|
|
||||||
// Create factor
|
// Create factor
|
||||||
|
@ -874,8 +923,8 @@ TEST( ImuFactor, ErrorWithBiasesAndSensorBodyDisplacement ) {
|
||||||
Point3(1, 0, 0));
|
Point3(1, 0, 0));
|
||||||
|
|
||||||
ImuFactor::PreintegratedMeasurements pre_int_data(
|
ImuFactor::PreintegratedMeasurements pre_int_data(
|
||||||
imuBias::ConstantBias(Vector3(0.2, 0.0, 0.0), Vector3(0.0, 0.0, 0.0)),
|
imuBias::ConstantBias(Vector3(0.2, 0.0, 0.0), Vector3(0.0, 0.0, 0.0)), kMeasuredAccCovariance,
|
||||||
Matrix3::Zero(), Matrix3::Zero(), Matrix3::Zero());
|
kMeasuredOmegaCovariance, kIntegrationErrorCovariance);
|
||||||
|
|
||||||
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
||||||
|
|
||||||
|
@ -933,8 +982,8 @@ TEST(ImuFactor, PredictPositionAndVelocity) {
|
||||||
I6x6 = Matrix::Identity(6, 6);
|
I6x6 = Matrix::Identity(6, 6);
|
||||||
|
|
||||||
ImuFactor::PreintegratedMeasurements pre_int_data(
|
ImuFactor::PreintegratedMeasurements pre_int_data(
|
||||||
imuBias::ConstantBias(Vector3(0.2, 0.0, 0.0), Vector3(0.0, 0.0, 0.0)),
|
imuBias::ConstantBias(Vector3(0.2, 0.0, 0.0), Vector3(0.0, 0.0, 0.0)), kMeasuredAccCovariance,
|
||||||
Matrix3::Zero(), Matrix3::Zero(), Matrix3::Zero(), true);
|
kMeasuredOmegaCovariance, kIntegrationErrorCovariance, true);
|
||||||
|
|
||||||
for (int i = 0; i < 1000; ++i)
|
for (int i = 0; i < 1000; ++i)
|
||||||
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
||||||
|
@ -974,8 +1023,8 @@ TEST(ImuFactor, PredictRotation) {
|
||||||
I6x6 = Matrix::Identity(6, 6);
|
I6x6 = Matrix::Identity(6, 6);
|
||||||
|
|
||||||
ImuFactor::PreintegratedMeasurements pre_int_data(
|
ImuFactor::PreintegratedMeasurements pre_int_data(
|
||||||
imuBias::ConstantBias(Vector3(0.2, 0.0, 0.0), Vector3(0.0, 0.0, 0.0)),
|
imuBias::ConstantBias(Vector3(0.2, 0.0, 0.0), Vector3(0.0, 0.0, 0.0)), kMeasuredAccCovariance,
|
||||||
Matrix3::Zero(), Matrix3::Zero(), Matrix3::Zero(), true);
|
kMeasuredOmegaCovariance, kIntegrationErrorCovariance, true);
|
||||||
|
|
||||||
for (int i = 0; i < 1000; ++i)
|
for (int i = 0; i < 1000; ++i)
|
||||||
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
pre_int_data.integrateMeasurement(measuredAcc, measuredOmega, deltaT);
|
||||||
|
|
|
@ -32,7 +32,9 @@ namespace gtsam {
|
||||||
template<class T>
|
template<class T>
|
||||||
class ExpressionFactor: public NoiseModelFactor {
|
class ExpressionFactor: public NoiseModelFactor {
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
|
typedef ExpressionFactor<T> This;
|
||||||
|
|
||||||
T measurement_; ///< the measurement to be compared with the expression
|
T measurement_; ///< the measurement to be compared with the expression
|
||||||
Expression<T> expression_; ///< the expression that is AD enabled
|
Expression<T> expression_; ///< the expression that is AD enabled
|
||||||
|
@ -40,7 +42,9 @@ protected:
|
||||||
|
|
||||||
static const int Dim = traits<T>::dimension;
|
static const int Dim = traits<T>::dimension;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
typedef boost::shared_ptr<ExpressionFactor<T> > shared_ptr;
|
||||||
|
|
||||||
/// Constructor
|
/// Constructor
|
||||||
ExpressionFactor(const SharedNoiseModel& noiseModel, //
|
ExpressionFactor(const SharedNoiseModel& noiseModel, //
|
||||||
|
@ -106,6 +110,11 @@ public:
|
||||||
|
|
||||||
return factor;
|
return factor;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// @return a deep copy of this factor
|
||||||
|
virtual gtsam::NonlinearFactor::shared_ptr clone() const {
|
||||||
|
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||||
|
gtsam::NonlinearFactor::shared_ptr(new This(*this))); }
|
||||||
};
|
};
|
||||||
// ExpressionFactor
|
// ExpressionFactor
|
||||||
|
|
||||||
|
|
|
@ -98,7 +98,7 @@ DoglegOptimizerImpl::TrustRegionAdaptationMode ISAM2DoglegParams::adaptationMode
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
ISAM2Params::Factorization ISAM2Params::factorizationTranslator(const std::string& str) const {
|
ISAM2Params::Factorization ISAM2Params::factorizationTranslator(const std::string& str) {
|
||||||
std::string s = str; boost::algorithm::to_upper(s);
|
std::string s = str; boost::algorithm::to_upper(s);
|
||||||
if (s == "QR") return ISAM2Params::QR;
|
if (s == "QR") return ISAM2Params::QR;
|
||||||
if (s == "CHOLESKY") return ISAM2Params::CHOLESKY;
|
if (s == "CHOLESKY") return ISAM2Params::CHOLESKY;
|
||||||
|
@ -108,7 +108,7 @@ ISAM2Params::Factorization ISAM2Params::factorizationTranslator(const std::strin
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
std::string ISAM2Params::factorizationTranslator(const ISAM2Params::Factorization& value) const {
|
std::string ISAM2Params::factorizationTranslator(const ISAM2Params::Factorization& value) {
|
||||||
std::string s;
|
std::string s;
|
||||||
switch (value) {
|
switch (value) {
|
||||||
case ISAM2Params::QR: s = "QR"; break;
|
case ISAM2Params::QR: s = "QR"; break;
|
||||||
|
|
|
@ -186,6 +186,7 @@ struct GTSAM_EXPORT ISAM2Params {
|
||||||
enableDetailedResults(false), enablePartialRelinearizationCheck(false),
|
enableDetailedResults(false), enablePartialRelinearizationCheck(false),
|
||||||
findUnusedFactorSlots(false) {}
|
findUnusedFactorSlots(false) {}
|
||||||
|
|
||||||
|
/// print iSAM2 parameters
|
||||||
void print(const std::string& str = "") const {
|
void print(const std::string& str = "") const {
|
||||||
std::cout << str << "\n";
|
std::cout << str << "\n";
|
||||||
if(optimizationParams.type() == typeid(ISAM2GaussNewtonParams))
|
if(optimizationParams.type() == typeid(ISAM2GaussNewtonParams))
|
||||||
|
@ -214,7 +215,9 @@ struct GTSAM_EXPORT ISAM2Params {
|
||||||
std::cout.flush();
|
std::cout.flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Getters and Setters for all properties */
|
/// @name Getters and Setters for all properties
|
||||||
|
/// @{
|
||||||
|
|
||||||
OptimizationParams getOptimizationParams() const { return this->optimizationParams; }
|
OptimizationParams getOptimizationParams() const { return this->optimizationParams; }
|
||||||
RelinearizationThreshold getRelinearizeThreshold() const { return relinearizeThreshold; }
|
RelinearizationThreshold getRelinearizeThreshold() const { return relinearizeThreshold; }
|
||||||
int getRelinearizeSkip() const { return relinearizeSkip; }
|
int getRelinearizeSkip() const { return relinearizeSkip; }
|
||||||
|
@ -237,14 +240,21 @@ struct GTSAM_EXPORT ISAM2Params {
|
||||||
void setEnableDetailedResults(bool enableDetailedResults) { this->enableDetailedResults = enableDetailedResults; }
|
void setEnableDetailedResults(bool enableDetailedResults) { this->enableDetailedResults = enableDetailedResults; }
|
||||||
void setEnablePartialRelinearizationCheck(bool enablePartialRelinearizationCheck) { this->enablePartialRelinearizationCheck = enablePartialRelinearizationCheck; }
|
void setEnablePartialRelinearizationCheck(bool enablePartialRelinearizationCheck) { this->enablePartialRelinearizationCheck = enablePartialRelinearizationCheck; }
|
||||||
|
|
||||||
Factorization factorizationTranslator(const std::string& str) const;
|
|
||||||
std::string factorizationTranslator(const Factorization& value) const;
|
|
||||||
|
|
||||||
GaussianFactorGraph::Eliminate getEliminationFunction() const {
|
GaussianFactorGraph::Eliminate getEliminationFunction() const {
|
||||||
return factorization == CHOLESKY
|
return factorization == CHOLESKY
|
||||||
? (GaussianFactorGraph::Eliminate)EliminatePreferCholesky
|
? (GaussianFactorGraph::Eliminate)EliminatePreferCholesky
|
||||||
: (GaussianFactorGraph::Eliminate)EliminateQR;
|
: (GaussianFactorGraph::Eliminate)EliminateQR;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// @}
|
||||||
|
|
||||||
|
/// @name Some utilities
|
||||||
|
/// @{
|
||||||
|
|
||||||
|
static Factorization factorizationTranslator(const std::string& str);
|
||||||
|
static std::string factorizationTranslator(const Factorization& value);
|
||||||
|
|
||||||
|
/// @}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -544,8 +554,15 @@ public:
|
||||||
boost::optional<std::vector<size_t>&> marginalFactorsIndices = boost::none,
|
boost::optional<std::vector<size_t>&> marginalFactorsIndices = boost::none,
|
||||||
boost::optional<std::vector<size_t>&> deletedFactorsIndices = boost::none);
|
boost::optional<std::vector<size_t>&> deletedFactorsIndices = boost::none);
|
||||||
|
|
||||||
/** Access the current linearization point */
|
/// Access the current linearization point
|
||||||
const Values& getLinearizationPoint() const { return theta_; }
|
const Values& getLinearizationPoint() const {
|
||||||
|
return theta_;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check whether variable with given key exists in linearization point
|
||||||
|
bool valueExists(Key key) const {
|
||||||
|
return theta_.exists(key);
|
||||||
|
}
|
||||||
|
|
||||||
/** Compute an estimate from the incomplete linear delta computed during the last update.
|
/** Compute an estimate from the incomplete linear delta computed during the last update.
|
||||||
* This delta is incomplete because it was not updated below wildfire_threshold. If only
|
* This delta is incomplete because it was not updated below wildfire_threshold. If only
|
||||||
|
|
|
@ -21,21 +21,14 @@
|
||||||
#include <gtsam/base/Testable.h>
|
#include <gtsam/base/Testable.h>
|
||||||
#include <gtsam/base/Manifold.h>
|
#include <gtsam/base/Manifold.h>
|
||||||
|
|
||||||
|
#include <boost/bind.hpp>
|
||||||
|
|
||||||
#include <limits>
|
#include <limits>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/**
|
|
||||||
* Template default compare function that assumes a testable T
|
|
||||||
*/
|
|
||||||
template<class T>
|
|
||||||
bool compare(const T& a, const T& b) {
|
|
||||||
GTSAM_CONCEPT_TESTABLE_TYPE(T);
|
|
||||||
return a.equals(b);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An equality factor that forces either one variable to a constant,
|
* An equality factor that forces either one variable to a constant,
|
||||||
* or a set of variables to be equal to each other.
|
* or a set of variables to be equal to each other.
|
||||||
|
@ -76,7 +69,9 @@ public:
|
||||||
/**
|
/**
|
||||||
* Function that compares two values
|
* Function that compares two values
|
||||||
*/
|
*/
|
||||||
bool (*compare_)(const T& a, const T& b);
|
typedef boost::function<bool(const T&, const T&)> CompareFunction;
|
||||||
|
CompareFunction compare_;
|
||||||
|
// bool (*compare_)(const T& a, const T& b);
|
||||||
|
|
||||||
/** default constructor - only for serialization */
|
/** default constructor - only for serialization */
|
||||||
NonlinearEquality() {
|
NonlinearEquality() {
|
||||||
|
@ -92,7 +87,7 @@ public:
|
||||||
* Constructor - forces exact evaluation
|
* Constructor - forces exact evaluation
|
||||||
*/
|
*/
|
||||||
NonlinearEquality(Key j, const T& feasible,
|
NonlinearEquality(Key j, const T& feasible,
|
||||||
bool (*_compare)(const T&, const T&) = compare<T>) :
|
const CompareFunction &_compare = boost::bind(traits<T>::Equals,_1,_2,1e-9)) :
|
||||||
Base(noiseModel::Constrained::All(traits<T>::GetDimension(feasible)),
|
Base(noiseModel::Constrained::All(traits<T>::GetDimension(feasible)),
|
||||||
j), feasible_(feasible), allow_error_(false), error_gain_(0.0), //
|
j), feasible_(feasible), allow_error_(false), error_gain_(0.0), //
|
||||||
compare_(_compare) {
|
compare_(_compare) {
|
||||||
|
@ -102,7 +97,7 @@ public:
|
||||||
* Constructor - allows inexact evaluation
|
* Constructor - allows inexact evaluation
|
||||||
*/
|
*/
|
||||||
NonlinearEquality(Key j, const T& feasible, double error_gain,
|
NonlinearEquality(Key j, const T& feasible, double error_gain,
|
||||||
bool (*_compare)(const T&, const T&) = compare<T>) :
|
const CompareFunction &_compare = boost::bind(traits<T>::Equals,_1,_2,1e-9)) :
|
||||||
Base(noiseModel::Constrained::All(traits<T>::GetDimension(feasible)),
|
Base(noiseModel::Constrained::All(traits<T>::GetDimension(feasible)),
|
||||||
j), feasible_(feasible), allow_error_(true), error_gain_(error_gain), //
|
j), feasible_(feasible), allow_error_(true), error_gain_(error_gain), //
|
||||||
compare_(_compare) {
|
compare_(_compare) {
|
||||||
|
@ -122,7 +117,7 @@ public:
|
||||||
/** Check if two factors are equal */
|
/** Check if two factors are equal */
|
||||||
virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const {
|
virtual bool equals(const NonlinearFactor& f, double tol = 1e-9) const {
|
||||||
const This* e = dynamic_cast<const This*>(&f);
|
const This* e = dynamic_cast<const This*>(&f);
|
||||||
return e && Base::equals(f) && feasible_.equals(e->feasible_, tol)
|
return e && Base::equals(f) && traits<T>::Equals(feasible_,e->feasible_, tol)
|
||||||
&& std::abs(error_gain_ - e->error_gain_) < tol;
|
&& std::abs(error_gain_ - e->error_gain_) < tol;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,11 +47,12 @@ double NonlinearFactorGraph::probPrime(const Values& c) const {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void NonlinearFactorGraph::print(const std::string& str, const KeyFormatter& keyFormatter) const {
|
void NonlinearFactorGraph::print(const std::string& str, const KeyFormatter& keyFormatter) const {
|
||||||
cout << str << "size: " << size() << endl;
|
cout << str << "size: " << size() << endl << endl;
|
||||||
for (size_t i = 0; i < factors_.size(); i++) {
|
for (size_t i = 0; i < factors_.size(); i++) {
|
||||||
stringstream ss;
|
stringstream ss;
|
||||||
ss << "factor " << i << ": ";
|
ss << "Factor " << i << ": ";
|
||||||
if (factors_[i] != NULL) factors_[i]->print(ss.str(), keyFormatter);
|
if (factors_[i] != NULL) factors_[i]->print(ss.str(), keyFormatter);
|
||||||
|
cout << endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,12 +63,12 @@ bool NonlinearFactorGraph::equals(const NonlinearFactorGraph& other, double tol)
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void NonlinearFactorGraph::saveGraph(std::ostream &stm, const Values& values,
|
void NonlinearFactorGraph::saveGraph(std::ostream &stm, const Values& values,
|
||||||
const GraphvizFormatting& graphvizFormatting,
|
const GraphvizFormatting& formatting,
|
||||||
const KeyFormatter& keyFormatter) const
|
const KeyFormatter& keyFormatter) const
|
||||||
{
|
{
|
||||||
stm << "graph {\n";
|
stm << "graph {\n";
|
||||||
stm << " size=\"" << graphvizFormatting.figureWidthInches << "," <<
|
stm << " size=\"" << formatting.figureWidthInches << "," <<
|
||||||
graphvizFormatting.figureHeightInches << "\";\n\n";
|
formatting.figureHeightInches << "\";\n\n";
|
||||||
|
|
||||||
FastSet<Key> keys = this->keys();
|
FastSet<Key> keys = this->keys();
|
||||||
|
|
||||||
|
@ -75,72 +76,38 @@ void NonlinearFactorGraph::saveGraph(std::ostream &stm, const Values& values,
|
||||||
struct { boost::optional<Point2> operator()(
|
struct { boost::optional<Point2> operator()(
|
||||||
const Value& value, const GraphvizFormatting& graphvizFormatting)
|
const Value& value, const GraphvizFormatting& graphvizFormatting)
|
||||||
{
|
{
|
||||||
if(const Pose2* p = dynamic_cast<const Pose2*>(&value)) {
|
Vector3 t;
|
||||||
double x, y;
|
if (const GenericValue<Pose2>* p = dynamic_cast<const GenericValue<Pose2>*>(&value)) {
|
||||||
switch (graphvizFormatting.paperHorizontalAxis) {
|
t << p->value().x(), p->value().y(), 0;
|
||||||
case GraphvizFormatting::X: x = p->x(); break;
|
} else if (const GenericValue<Point2>* p = dynamic_cast<const GenericValue<Point2>*>(&value)) {
|
||||||
case GraphvizFormatting::Y: x = p->y(); break;
|
t << p->value().x(), p->value().y(), 0;
|
||||||
case GraphvizFormatting::Z: x = 0.0; break;
|
} else if (const GenericValue<Pose3>* p = dynamic_cast<const GenericValue<Pose3>*>(&value)) {
|
||||||
case GraphvizFormatting::NEGX: x = -p->x(); break;
|
t = p->value().translation().vector();
|
||||||
case GraphvizFormatting::NEGY: x = -p->y(); break;
|
} else if (const GenericValue<Point3>* p = dynamic_cast<const GenericValue<Point3>*>(&value)) {
|
||||||
case GraphvizFormatting::NEGZ: x = 0.0; break;
|
t = p->value().vector();
|
||||||
default: throw std::runtime_error("Invalid enum value");
|
|
||||||
}
|
|
||||||
switch (graphvizFormatting.paperVerticalAxis) {
|
|
||||||
case GraphvizFormatting::X: y = p->x(); break;
|
|
||||||
case GraphvizFormatting::Y: y = p->y(); break;
|
|
||||||
case GraphvizFormatting::Z: y = 0.0; break;
|
|
||||||
case GraphvizFormatting::NEGX: y = -p->x(); break;
|
|
||||||
case GraphvizFormatting::NEGY: y = -p->y(); break;
|
|
||||||
case GraphvizFormatting::NEGZ: y = 0.0; break;
|
|
||||||
default: throw std::runtime_error("Invalid enum value");
|
|
||||||
}
|
|
||||||
return Point2(x,y);
|
|
||||||
} else if(const Pose3* p = dynamic_cast<const Pose3*>(&value)) {
|
|
||||||
double x, y;
|
|
||||||
switch (graphvizFormatting.paperHorizontalAxis) {
|
|
||||||
case GraphvizFormatting::X: x = p->x(); break;
|
|
||||||
case GraphvizFormatting::Y: x = p->y(); break;
|
|
||||||
case GraphvizFormatting::Z: x = p->z(); break;
|
|
||||||
case GraphvizFormatting::NEGX: x = -p->x(); break;
|
|
||||||
case GraphvizFormatting::NEGY: x = -p->y(); break;
|
|
||||||
case GraphvizFormatting::NEGZ: x = -p->z(); break;
|
|
||||||
default: throw std::runtime_error("Invalid enum value");
|
|
||||||
}
|
|
||||||
switch (graphvizFormatting.paperVerticalAxis) {
|
|
||||||
case GraphvizFormatting::X: y = p->x(); break;
|
|
||||||
case GraphvizFormatting::Y: y = p->y(); break;
|
|
||||||
case GraphvizFormatting::Z: y = p->z(); break;
|
|
||||||
case GraphvizFormatting::NEGX: y = -p->x(); break;
|
|
||||||
case GraphvizFormatting::NEGY: y = -p->y(); break;
|
|
||||||
case GraphvizFormatting::NEGZ: y = -p->z(); break;
|
|
||||||
default: throw std::runtime_error("Invalid enum value");
|
|
||||||
}
|
|
||||||
return Point2(x,y);
|
|
||||||
} else if(const Point3* p = dynamic_cast<const Point3*>(&value)) {
|
|
||||||
double x, y;
|
|
||||||
switch (graphvizFormatting.paperHorizontalAxis) {
|
|
||||||
case GraphvizFormatting::X: x = p->x(); break;
|
|
||||||
case GraphvizFormatting::Y: x = p->y(); break;
|
|
||||||
case GraphvizFormatting::Z: x = p->z(); break;
|
|
||||||
case GraphvizFormatting::NEGX: x = -p->x(); break;
|
|
||||||
case GraphvizFormatting::NEGY: x = -p->y(); break;
|
|
||||||
case GraphvizFormatting::NEGZ: x = -p->z(); break;
|
|
||||||
default: throw std::runtime_error("Invalid enum value");
|
|
||||||
}
|
|
||||||
switch (graphvizFormatting.paperVerticalAxis) {
|
|
||||||
case GraphvizFormatting::X: y = p->x(); break;
|
|
||||||
case GraphvizFormatting::Y: y = p->y(); break;
|
|
||||||
case GraphvizFormatting::Z: y = p->z(); break;
|
|
||||||
case GraphvizFormatting::NEGX: y = -p->x(); break;
|
|
||||||
case GraphvizFormatting::NEGY: y = -p->y(); break;
|
|
||||||
case GraphvizFormatting::NEGZ: y = -p->z(); break;
|
|
||||||
default: throw std::runtime_error("Invalid enum value");
|
|
||||||
}
|
|
||||||
return Point2(x,y);
|
|
||||||
} else {
|
} else {
|
||||||
return boost::none;
|
return boost::none;
|
||||||
}
|
}
|
||||||
|
double x, y;
|
||||||
|
switch (graphvizFormatting.paperHorizontalAxis) {
|
||||||
|
case GraphvizFormatting::X: x = t.x(); break;
|
||||||
|
case GraphvizFormatting::Y: x = t.y(); break;
|
||||||
|
case GraphvizFormatting::Z: x = t.z(); break;
|
||||||
|
case GraphvizFormatting::NEGX: x = -t.x(); break;
|
||||||
|
case GraphvizFormatting::NEGY: x = -t.y(); break;
|
||||||
|
case GraphvizFormatting::NEGZ: x = -t.z(); break;
|
||||||
|
default: throw std::runtime_error("Invalid enum value");
|
||||||
|
}
|
||||||
|
switch (graphvizFormatting.paperVerticalAxis) {
|
||||||
|
case GraphvizFormatting::X: y = t.x(); break;
|
||||||
|
case GraphvizFormatting::Y: y = t.y(); break;
|
||||||
|
case GraphvizFormatting::Z: y = t.z(); break;
|
||||||
|
case GraphvizFormatting::NEGX: y = -t.x(); break;
|
||||||
|
case GraphvizFormatting::NEGY: y = -t.y(); break;
|
||||||
|
case GraphvizFormatting::NEGZ: y = -t.z(); break;
|
||||||
|
default: throw std::runtime_error("Invalid enum value");
|
||||||
|
}
|
||||||
|
return Point2(x,y);
|
||||||
}} getXY;
|
}} getXY;
|
||||||
|
|
||||||
// Find bounds
|
// Find bounds
|
||||||
|
@ -148,7 +115,7 @@ void NonlinearFactorGraph::saveGraph(std::ostream &stm, const Values& values,
|
||||||
double minY = numeric_limits<double>::infinity(), maxY = -numeric_limits<double>::infinity();
|
double minY = numeric_limits<double>::infinity(), maxY = -numeric_limits<double>::infinity();
|
||||||
BOOST_FOREACH(Key key, keys) {
|
BOOST_FOREACH(Key key, keys) {
|
||||||
if(values.exists(key)) {
|
if(values.exists(key)) {
|
||||||
boost::optional<Point2> xy = getXY(values.at(key), graphvizFormatting);
|
boost::optional<Point2> xy = getXY(values.at(key), formatting);
|
||||||
if(xy) {
|
if(xy) {
|
||||||
if(xy->x() < minX)
|
if(xy->x() < minX)
|
||||||
minX = xy->x();
|
minX = xy->x();
|
||||||
|
@ -163,33 +130,22 @@ void NonlinearFactorGraph::saveGraph(std::ostream &stm, const Values& values,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create nodes for each variable in the graph
|
// Create nodes for each variable in the graph
|
||||||
bool firstTimePoses = true;
|
BOOST_FOREACH(Key key, keys){
|
||||||
Key lastKey;
|
|
||||||
BOOST_FOREACH(Key key, keys) {
|
|
||||||
// Label the node with the label from the KeyFormatter
|
// Label the node with the label from the KeyFormatter
|
||||||
stm << " var" << key << "[label=\"" << keyFormatter(key) << "\"";
|
stm << " var" << key << "[label=\"" << keyFormatter(key) << "\"";
|
||||||
if(values.exists(key)) {
|
if(values.exists(key)) {
|
||||||
boost::optional<Point2> xy = getXY(values.at(key), graphvizFormatting);
|
boost::optional<Point2> xy = getXY(values.at(key), formatting);
|
||||||
if(xy)
|
if(xy)
|
||||||
stm << ", pos=\"" << graphvizFormatting.scale*(xy->x() - minX) << "," << graphvizFormatting.scale*(xy->y() - minY) << "!\"";
|
stm << ", pos=\"" << formatting.scale*(xy->x() - minX) << "," << formatting.scale*(xy->y() - minY) << "!\"";
|
||||||
}
|
}
|
||||||
stm << "];\n";
|
stm << "];\n";
|
||||||
|
|
||||||
if (firstTimePoses) {
|
|
||||||
lastKey = key;
|
|
||||||
firstTimePoses = false;
|
|
||||||
} else {
|
|
||||||
stm << " var" << key << "--" << "var" << lastKey << ";\n";
|
|
||||||
lastKey = key;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
stm << "\n";
|
stm << "\n";
|
||||||
|
|
||||||
|
if (formatting.mergeSimilarFactors) {
|
||||||
if(graphvizFormatting.mergeSimilarFactors) {
|
|
||||||
// Remove duplicate factors
|
// Remove duplicate factors
|
||||||
FastSet<vector<Key> > structure;
|
FastSet<vector<Key> > structure;
|
||||||
BOOST_FOREACH(const sharedFactor& factor, *this) {
|
BOOST_FOREACH(const sharedFactor& factor, *this){
|
||||||
if(factor) {
|
if(factor) {
|
||||||
vector<Key> factorKeys = factor->keys();
|
vector<Key> factorKeys = factor->keys();
|
||||||
std::sort(factorKeys.begin(), factorKeys.end());
|
std::sort(factorKeys.begin(), factorKeys.end());
|
||||||
|
@ -199,57 +155,65 @@ void NonlinearFactorGraph::saveGraph(std::ostream &stm, const Values& values,
|
||||||
|
|
||||||
// Create factors and variable connections
|
// Create factors and variable connections
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
BOOST_FOREACH(const vector<Key>& factorKeys, structure) {
|
BOOST_FOREACH(const vector<Key>& factorKeys, structure){
|
||||||
// Make each factor a dot
|
// Make each factor a dot
|
||||||
stm << " factor" << i << "[label=\"\", shape=point";
|
stm << " factor" << i << "[label=\"\", shape=point";
|
||||||
{
|
{
|
||||||
map<size_t, Point2>::const_iterator pos = graphvizFormatting.factorPositions.find(i);
|
map<size_t, Point2>::const_iterator pos = formatting.factorPositions.find(i);
|
||||||
if(pos != graphvizFormatting.factorPositions.end())
|
if(pos != formatting.factorPositions.end())
|
||||||
stm << ", pos=\"" << graphvizFormatting.scale*(pos->second.x() - minX) << "," << graphvizFormatting.scale*(pos->second.y() - minY) << "!\"";
|
stm << ", pos=\"" << formatting.scale*(pos->second.x() - minX) << ","
|
||||||
|
<< formatting.scale*(pos->second.y() - minY) << "!\"";
|
||||||
}
|
}
|
||||||
stm << "];\n";
|
stm << "];\n";
|
||||||
|
|
||||||
// Make factor-variable connections
|
// Make factor-variable connections
|
||||||
BOOST_FOREACH(Key key, factorKeys) {
|
BOOST_FOREACH(Key key, factorKeys) {
|
||||||
stm << " var" << key << "--" << "factor" << i << ";\n"; }
|
stm << " var" << key << "--" << "factor" << i << ";\n";
|
||||||
|
}
|
||||||
|
|
||||||
++ i;
|
++ i;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Create factors and variable connections
|
// Create factors and variable connections
|
||||||
for(size_t i = 0; i < this->size(); ++i) {
|
for(size_t i = 0; i < this->size(); ++i) {
|
||||||
if(graphvizFormatting.plotFactorPoints){
|
const NonlinearFactor::shared_ptr& factor = this->at(i);
|
||||||
// Make each factor a dot
|
if(formatting.plotFactorPoints) {
|
||||||
stm << " factor" << i << "[label=\"\", shape=point";
|
const FastVector<Key>& keys = factor->keys();
|
||||||
{
|
if (formatting.binaryEdges && keys.size()==2) {
|
||||||
map<size_t, Point2>::const_iterator pos = graphvizFormatting.factorPositions.find(i);
|
stm << " var" << keys[0] << "--" << "var" << keys[1] << ";\n";
|
||||||
if(pos != graphvizFormatting.factorPositions.end())
|
} else {
|
||||||
stm << ", pos=\"" << graphvizFormatting.scale*(pos->second.x() - minX) << "," << graphvizFormatting.scale*(pos->second.y() - minY) << "!\"";
|
// Make each factor a dot
|
||||||
}
|
stm << " factor" << i << "[label=\"\", shape=point";
|
||||||
stm << "];\n";
|
{
|
||||||
|
map<size_t, Point2>::const_iterator pos = formatting.factorPositions.find(i);
|
||||||
|
if(pos != formatting.factorPositions.end())
|
||||||
|
stm << ", pos=\"" << formatting.scale*(pos->second.x() - minX) << ","
|
||||||
|
<< formatting.scale*(pos->second.y() - minY) << "!\"";
|
||||||
|
}
|
||||||
|
stm << "];\n";
|
||||||
|
|
||||||
// Make factor-variable connections
|
// Make factor-variable connections
|
||||||
if(graphvizFormatting.connectKeysToFactor && this->at(i)) {
|
if(formatting.connectKeysToFactor && factor) {
|
||||||
BOOST_FOREACH(Key key, *this->at(i)) {
|
BOOST_FOREACH(Key key, *factor) {
|
||||||
stm << " var" << key << "--" << "factor" << i << ";\n";
|
stm << " var" << key << "--" << "factor" << i << ";\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
else {
|
else {
|
||||||
if(this->at(i)) {
|
if(factor) {
|
||||||
Key k;
|
Key k;
|
||||||
bool firstTime = true;
|
bool firstTime = true;
|
||||||
BOOST_FOREACH(Key key, *this->at(i)) {
|
BOOST_FOREACH(Key key, *this->at(i)) {
|
||||||
if(firstTime){
|
if(firstTime) {
|
||||||
k = key;
|
k = key;
|
||||||
firstTime = false;
|
firstTime = false;
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
stm << " var" << key << "--" << "var" << k << ";\n";
|
||||||
|
k = key;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
stm << " var" << key << "--" << "var" << k << ";\n";
|
|
||||||
k = key;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,10 @@ namespace gtsam {
|
||||||
class Ordering;
|
class Ordering;
|
||||||
class GaussianFactorGraph;
|
class GaussianFactorGraph;
|
||||||
class SymbolicFactorGraph;
|
class SymbolicFactorGraph;
|
||||||
|
template<typename T>
|
||||||
|
class Expression;
|
||||||
|
template<typename T>
|
||||||
|
class ExpressionFactor;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Formatting options when saving in GraphViz format using
|
* Formatting options when saving in GraphViz format using
|
||||||
|
@ -47,6 +51,7 @@ namespace gtsam {
|
||||||
bool mergeSimilarFactors; ///< Merge multiple factors that have the same connectivity
|
bool mergeSimilarFactors; ///< Merge multiple factors that have the same connectivity
|
||||||
bool plotFactorPoints; ///< Plots each factor as a dot between the variables
|
bool plotFactorPoints; ///< Plots each factor as a dot between the variables
|
||||||
bool connectKeysToFactor; ///< Draw a line from each key within a factor to the dot of the factor
|
bool connectKeysToFactor; ///< Draw a line from each key within a factor to the dot of the factor
|
||||||
|
bool binaryEdges; ///< just use non-dotted edges for binary factors
|
||||||
std::map<size_t, Point2> factorPositions; ///< (optional for each factor) Manually specify factor "dot" positions.
|
std::map<size_t, Point2> factorPositions; ///< (optional for each factor) Manually specify factor "dot" positions.
|
||||||
/// Default constructor sets up robot coordinates. Paper horizontal is robot Y,
|
/// Default constructor sets up robot coordinates. Paper horizontal is robot Y,
|
||||||
/// paper vertical is robot X. Default figure size of 5x5 in.
|
/// paper vertical is robot X. Default figure size of 5x5 in.
|
||||||
|
@ -54,7 +59,7 @@ namespace gtsam {
|
||||||
paperHorizontalAxis(Y), paperVerticalAxis(X),
|
paperHorizontalAxis(Y), paperVerticalAxis(X),
|
||||||
figureWidthInches(5), figureHeightInches(5), scale(1),
|
figureWidthInches(5), figureHeightInches(5), scale(1),
|
||||||
mergeSimilarFactors(false), plotFactorPoints(true),
|
mergeSimilarFactors(false), plotFactorPoints(true),
|
||||||
connectKeysToFactor(true) {}
|
connectKeysToFactor(true), binaryEdges(true) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -150,6 +155,18 @@ namespace gtsam {
|
||||||
*/
|
*/
|
||||||
NonlinearFactorGraph rekey(const std::map<Key,Key>& rekey_mapping) const;
|
NonlinearFactorGraph rekey(const std::map<Key,Key>& rekey_mapping) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Directly add ExpressionFactor that implements |h(x)-z|^2_R
|
||||||
|
* @param h expression that implements measurement function
|
||||||
|
* @param z measurement
|
||||||
|
* @param R model
|
||||||
|
*/
|
||||||
|
template<typename T>
|
||||||
|
void addExpressionFactor(const SharedNoiseModel& R, const T& z,
|
||||||
|
const Expression<T>& h) {
|
||||||
|
push_back(boost::make_shared<ExpressionFactor<T> >(R, z, h));
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
||||||
/** Serialization function */
|
/** Serialization function */
|
||||||
|
|
|
@ -21,9 +21,9 @@
|
||||||
|
|
||||||
#include <gtsam/nonlinear/ExpressionFactor.h>
|
#include <gtsam/nonlinear/ExpressionFactor.h>
|
||||||
#include <gtsam/nonlinear/Expression.h>
|
#include <gtsam/nonlinear/Expression.h>
|
||||||
|
#include <gtsam/nonlinear/factorTesting.h>
|
||||||
#include <gtsam/base/Matrix.h>
|
#include <gtsam/base/Matrix.h>
|
||||||
#include <gtsam/base/Testable.h>
|
#include <gtsam/base/Testable.h>
|
||||||
#include <gtsam/base/numericalDerivative.h>
|
|
||||||
#include <gtsam/linear/VectorValues.h>
|
#include <gtsam/linear/VectorValues.h>
|
||||||
|
|
||||||
#include <CppUnitLite/TestResult.h>
|
#include <CppUnitLite/TestResult.h>
|
||||||
|
@ -32,47 +32,6 @@
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/**
|
|
||||||
* Linearize a nonlinear factor using numerical differentiation
|
|
||||||
* The benefit of this method is that it does not need to know what types are
|
|
||||||
* involved to evaluate the factor. If all the machinery of gtsam is working
|
|
||||||
* correctly, we should get the correct numerical derivatives out the other side.
|
|
||||||
*/
|
|
||||||
JacobianFactor linearizeNumerically(const NoiseModelFactor& factor,
|
|
||||||
const Values& values, double delta = 1e-5) {
|
|
||||||
|
|
||||||
// We will fill a map of Jacobians
|
|
||||||
std::map<Key, Matrix> jacobians;
|
|
||||||
|
|
||||||
// Get size
|
|
||||||
Eigen::VectorXd e = factor.whitenedError(values);
|
|
||||||
const size_t rows = e.size();
|
|
||||||
|
|
||||||
// Loop over all variables
|
|
||||||
VectorValues dX = values.zeroVectors();
|
|
||||||
BOOST_FOREACH(Key key, factor) {
|
|
||||||
// Compute central differences using the values struct.
|
|
||||||
const size_t cols = dX.dim(key);
|
|
||||||
Matrix J = Matrix::Zero(rows, cols);
|
|
||||||
for (size_t col = 0; col < cols; ++col) {
|
|
||||||
Eigen::VectorXd dx = Eigen::VectorXd::Zero(cols);
|
|
||||||
dx[col] = delta;
|
|
||||||
dX[key] = dx;
|
|
||||||
Values eval_values = values.retract(dX);
|
|
||||||
Eigen::VectorXd left = factor.whitenedError(eval_values);
|
|
||||||
dx[col] = -delta;
|
|
||||||
dX[key] = dx;
|
|
||||||
eval_values = values.retract(dX);
|
|
||||||
Eigen::VectorXd right = factor.whitenedError(eval_values);
|
|
||||||
J.col(col) = (left - right) * (1.0 / (2.0 * delta));
|
|
||||||
}
|
|
||||||
jacobians[key] = J;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next step...return JacobianFactor
|
|
||||||
return JacobianFactor(jacobians, -e);
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace internal {
|
namespace internal {
|
||||||
// CPPUnitLite-style test for linearization of a factor
|
// CPPUnitLite-style test for linearization of a factor
|
||||||
void testFactorJacobians(TestResult& result_, const std::string& name_,
|
void testFactorJacobians(TestResult& result_, const std::string& name_,
|
||||||
|
|
|
@ -18,6 +18,12 @@ Expression<T> between(const Expression<T>& t1, const Expression<T>& t2) {
|
||||||
return Expression<T>(traits<T>::Between, t1, t2);
|
return Expression<T>(traits<T>::Between, t1, t2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Generics
|
||||||
|
template<typename T>
|
||||||
|
Expression<T> compose(const Expression<T>& t1, const Expression<T>& t2) {
|
||||||
|
return Expression<T>(traits<T>::Compose, t1, t2);
|
||||||
|
}
|
||||||
|
|
||||||
typedef Expression<double> double_;
|
typedef Expression<double> double_;
|
||||||
typedef Expression<Vector3> Vector3_;
|
typedef Expression<Vector3> Vector3_;
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
/* ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||||
|
* Atlanta, Georgia 30332-0415
|
||||||
|
* All Rights Reserved
|
||||||
|
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||||
|
|
||||||
|
* See LICENSE for the license information
|
||||||
|
|
||||||
|
* -------------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @file factorTesting.h
|
||||||
|
* @date September 18, 2014
|
||||||
|
* @author Frank Dellaert
|
||||||
|
* @author Paul Furgale
|
||||||
|
* @brief Evaluate derivatives of a nonlinear factor numerically
|
||||||
|
*/
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||||
|
#include <gtsam/base/numericalDerivative.h>
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Linearize a nonlinear factor using numerical differentiation
|
||||||
|
* The benefit of this method is that it does not need to know what types are
|
||||||
|
* involved to evaluate the factor. If all the machinery of gtsam is working
|
||||||
|
* correctly, we should get the correct numerical derivatives out the other side.
|
||||||
|
*/
|
||||||
|
JacobianFactor linearizeNumerically(const NoiseModelFactor& factor,
|
||||||
|
const Values& values, double delta = 1e-5) {
|
||||||
|
|
||||||
|
// We will fill a vector of key/Jacobians pairs (a map would sort)
|
||||||
|
std::vector<std::pair<Key, Matrix> > jacobians;
|
||||||
|
|
||||||
|
// Get size
|
||||||
|
Eigen::VectorXd e = factor.whitenedError(values);
|
||||||
|
const size_t rows = e.size();
|
||||||
|
|
||||||
|
// Loop over all variables
|
||||||
|
VectorValues dX = values.zeroVectors();
|
||||||
|
BOOST_FOREACH(Key key, factor) {
|
||||||
|
// Compute central differences using the values struct.
|
||||||
|
const size_t cols = dX.dim(key);
|
||||||
|
Matrix J = Matrix::Zero(rows, cols);
|
||||||
|
for (size_t col = 0; col < cols; ++col) {
|
||||||
|
Eigen::VectorXd dx = Eigen::VectorXd::Zero(cols);
|
||||||
|
dx[col] = delta;
|
||||||
|
dX[key] = dx;
|
||||||
|
Values eval_values = values.retract(dX);
|
||||||
|
Eigen::VectorXd left = factor.whitenedError(eval_values);
|
||||||
|
dx[col] = -delta;
|
||||||
|
dX[key] = dx;
|
||||||
|
eval_values = values.retract(dX);
|
||||||
|
Eigen::VectorXd right = factor.whitenedError(eval_values);
|
||||||
|
J.col(col) = (left - right) * (1.0 / (2.0 * delta));
|
||||||
|
}
|
||||||
|
jacobians.push_back(std::make_pair(key,J));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next step...return JacobianFactor
|
||||||
|
return JacobianFactor(jacobians, -e);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace gtsam
|
|
@ -22,6 +22,7 @@
|
||||||
#include <gtsam/slam/ProjectionFactor.h>
|
#include <gtsam/slam/ProjectionFactor.h>
|
||||||
#include <gtsam/slam/PriorFactor.h>
|
#include <gtsam/slam/PriorFactor.h>
|
||||||
#include <gtsam/nonlinear/ExpressionFactor.h>
|
#include <gtsam/nonlinear/ExpressionFactor.h>
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
#include <gtsam/nonlinear/expressionTesting.h>
|
#include <gtsam/nonlinear/expressionTesting.h>
|
||||||
#include <gtsam/base/Testable.h>
|
#include <gtsam/base/Testable.h>
|
||||||
|
|
||||||
|
@ -496,6 +497,11 @@ TEST(ExpressionFactor, tree_finite_differences) {
|
||||||
EXPECT_CORRECT_EXPRESSION_JACOBIANS(uv_hat, values, fd_step, tolerance);
|
EXPECT_CORRECT_EXPRESSION_JACOBIANS(uv_hat, values, fd_step, tolerance);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(ExpressionFactor, push_back) {
|
||||||
|
NonlinearFactorGraph graph;
|
||||||
|
graph.addExpressionFactor(model, Point2(0, 0), leaf::p);
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main() {
|
int main() {
|
||||||
TestResult tr;
|
TestResult tr;
|
||||||
|
|
|
@ -57,6 +57,11 @@ namespace gtsam {
|
||||||
Base(model, key), prior_(prior) {
|
Base(model, key), prior_(prior) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Convenience constructor that takes a full covariance argument */
|
||||||
|
PriorFactor(Key key, const VALUE& prior, const Matrix& covariance) :
|
||||||
|
Base(noiseModel::Gaussian::Covariance(covariance), key), prior_(prior) {
|
||||||
|
}
|
||||||
|
|
||||||
/// @return a deep copy of this factor
|
/// @return a deep copy of this factor
|
||||||
virtual gtsam::NonlinearFactor::shared_ptr clone() const {
|
virtual gtsam::NonlinearFactor::shared_ptr clone() const {
|
||||||
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
return boost::static_pointer_cast<gtsam::NonlinearFactor>(
|
||||||
|
|
|
@ -64,8 +64,8 @@ string findExampleDataFile(const string& name) {
|
||||||
throw
|
throw
|
||||||
invalid_argument(
|
invalid_argument(
|
||||||
"gtsam::findExampleDataFile could not find a matching file in\n"
|
"gtsam::findExampleDataFile could not find a matching file in\n"
|
||||||
SOURCE_TREE_DATASET_DIR " or\n"
|
GTSAM_SOURCE_TREE_DATASET_DIR " or\n"
|
||||||
INSTALLED_DATASET_DIR " named\n" +
|
GTSAM_INSTALLED_DATASET_DIR " named\n" +
|
||||||
name + ", " + name + ".graph, or " + name + ".txt");
|
name + ", " + name + ".graph, or " + name + ".txt");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,25 +28,34 @@
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void BatchFixedLagSmoother::print(const std::string& s, const KeyFormatter& keyFormatter) const {
|
void BatchFixedLagSmoother::print(const std::string& s,
|
||||||
|
const KeyFormatter& keyFormatter) const {
|
||||||
FixedLagSmoother::print(s, keyFormatter);
|
FixedLagSmoother::print(s, keyFormatter);
|
||||||
// TODO: What else to print?
|
// TODO: What else to print?
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
bool BatchFixedLagSmoother::equals(const FixedLagSmoother& rhs, double tol) const {
|
bool BatchFixedLagSmoother::equals(const FixedLagSmoother& rhs,
|
||||||
const BatchFixedLagSmoother* e = dynamic_cast<const BatchFixedLagSmoother*> (&rhs);
|
double tol) const {
|
||||||
return e != NULL
|
const BatchFixedLagSmoother* e =
|
||||||
&& FixedLagSmoother::equals(*e, tol)
|
dynamic_cast<const BatchFixedLagSmoother*>(&rhs);
|
||||||
&& factors_.equals(e->factors_, tol)
|
return e != NULL && FixedLagSmoother::equals(*e, tol)
|
||||||
&& theta_.equals(e->theta_, tol);
|
&& factors_.equals(e->factors_, tol) && theta_.equals(e->theta_, tol);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
FixedLagSmoother::Result BatchFixedLagSmoother::update(const NonlinearFactorGraph& newFactors, const Values& newTheta, const KeyTimestampMap& timestamps) {
|
Matrix BatchFixedLagSmoother::marginalCovariance(Key key) const {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"BatchFixedLagSmoother::marginalCovariance not implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
FixedLagSmoother::Result BatchFixedLagSmoother::update(
|
||||||
|
const NonlinearFactorGraph& newFactors, const Values& newTheta,
|
||||||
|
const KeyTimestampMap& timestamps) {
|
||||||
|
|
||||||
const bool debug = ISDEBUG("BatchFixedLagSmoother update");
|
const bool debug = ISDEBUG("BatchFixedLagSmoother update");
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "BatchFixedLagSmoother::update() START" << std::endl;
|
std::cout << "BatchFixedLagSmoother::update() START" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,11 +79,13 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update(const NonlinearFactorGrap
|
||||||
|
|
||||||
// Get current timestamp
|
// Get current timestamp
|
||||||
double current_timestamp = getCurrentTimestamp();
|
double current_timestamp = getCurrentTimestamp();
|
||||||
if(debug) std::cout << "Current Timestamp: " << current_timestamp << std::endl;
|
if (debug)
|
||||||
|
std::cout << "Current Timestamp: " << current_timestamp << std::endl;
|
||||||
|
|
||||||
// Find the set of variables to be marginalized out
|
// Find the set of variables to be marginalized out
|
||||||
std::set<Key> marginalizableKeys = findKeysBefore(current_timestamp - smootherLag_);
|
std::set<Key> marginalizableKeys = findKeysBefore(
|
||||||
if(debug) {
|
current_timestamp - smootherLag_);
|
||||||
|
if (debug) {
|
||||||
std::cout << "Marginalizable Keys: ";
|
std::cout << "Marginalizable Keys: ";
|
||||||
BOOST_FOREACH(Key key, marginalizableKeys) {
|
BOOST_FOREACH(Key key, marginalizableKeys) {
|
||||||
std::cout << DefaultKeyFormatter(key) << " ";
|
std::cout << DefaultKeyFormatter(key) << " ";
|
||||||
|
@ -90,19 +101,19 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update(const NonlinearFactorGrap
|
||||||
// Optimize
|
// Optimize
|
||||||
gttic(optimize);
|
gttic(optimize);
|
||||||
Result result;
|
Result result;
|
||||||
if(factors_.size() > 0) {
|
if (factors_.size() > 0) {
|
||||||
result = optimize();
|
result = optimize();
|
||||||
}
|
}
|
||||||
gttoc(optimize);
|
gttoc(optimize);
|
||||||
|
|
||||||
// Marginalize out old variables.
|
// Marginalize out old variables.
|
||||||
gttic(marginalize);
|
gttic(marginalize);
|
||||||
if(marginalizableKeys.size() > 0) {
|
if (marginalizableKeys.size() > 0) {
|
||||||
marginalize(marginalizableKeys);
|
marginalize(marginalizableKeys);
|
||||||
}
|
}
|
||||||
gttoc(marginalize);
|
gttoc(marginalize);
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "BatchFixedLagSmoother::update() FINISH" << std::endl;
|
std::cout << "BatchFixedLagSmoother::update() FINISH" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,11 +121,12 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update(const NonlinearFactorGrap
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void BatchFixedLagSmoother::insertFactors(const NonlinearFactorGraph& newFactors) {
|
void BatchFixedLagSmoother::insertFactors(
|
||||||
|
const NonlinearFactorGraph& newFactors) {
|
||||||
BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, newFactors) {
|
BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, newFactors) {
|
||||||
Key index;
|
Key index;
|
||||||
// Insert the factor into an existing hole in the factor graph, if possible
|
// Insert the factor into an existing hole in the factor graph, if possible
|
||||||
if(availableSlots_.size() > 0) {
|
if (availableSlots_.size() > 0) {
|
||||||
index = availableSlots_.front();
|
index = availableSlots_.front();
|
||||||
availableSlots_.pop();
|
availableSlots_.pop();
|
||||||
factors_.replace(index, factor);
|
factors_.replace(index, factor);
|
||||||
|
@ -130,9 +142,10 @@ void BatchFixedLagSmoother::insertFactors(const NonlinearFactorGraph& newFactors
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void BatchFixedLagSmoother::removeFactors(const std::set<size_t>& deleteFactors) {
|
void BatchFixedLagSmoother::removeFactors(
|
||||||
|
const std::set<size_t>& deleteFactors) {
|
||||||
BOOST_FOREACH(size_t slot, deleteFactors) {
|
BOOST_FOREACH(size_t slot, deleteFactors) {
|
||||||
if(factors_.at(slot)) {
|
if (factors_.at(slot)) {
|
||||||
// Remove references to this factor from the FactorIndex
|
// Remove references to this factor from the FactorIndex
|
||||||
BOOST_FOREACH(Key key, *(factors_.at(slot))) {
|
BOOST_FOREACH(Key key, *(factors_.at(slot))) {
|
||||||
factorIndex_[key].erase(slot);
|
factorIndex_[key].erase(slot);
|
||||||
|
@ -143,7 +156,8 @@ void BatchFixedLagSmoother::removeFactors(const std::set<size_t>& deleteFactors)
|
||||||
availableSlots_.push(slot);
|
availableSlots_.push(slot);
|
||||||
} else {
|
} else {
|
||||||
// TODO: Throw an error??
|
// TODO: Throw an error??
|
||||||
std::cout << "Attempting to remove a factor from slot " << slot << ", but it is already NULL." << std::endl;
|
std::cout << "Attempting to remove a factor from slot " << slot
|
||||||
|
<< ", but it is already NULL." << std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -159,7 +173,7 @@ void BatchFixedLagSmoother::eraseKeys(const std::set<Key>& keys) {
|
||||||
factorIndex_.erase(key);
|
factorIndex_.erase(key);
|
||||||
|
|
||||||
// Erase the key from the set of linearized keys
|
// Erase the key from the set of linearized keys
|
||||||
if(linearKeys_.exists(key)) {
|
if (linearKeys_.exists(key)) {
|
||||||
linearKeys_.erase(key);
|
linearKeys_.erase(key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -178,11 +192,11 @@ void BatchFixedLagSmoother::reorder(const std::set<Key>& marginalizeKeys) {
|
||||||
|
|
||||||
const bool debug = ISDEBUG("BatchFixedLagSmoother reorder");
|
const bool debug = ISDEBUG("BatchFixedLagSmoother reorder");
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "BatchFixedLagSmoother::reorder() START" << std::endl;
|
std::cout << "BatchFixedLagSmoother::reorder() START" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "Marginalizable Keys: ";
|
std::cout << "Marginalizable Keys: ";
|
||||||
BOOST_FOREACH(Key key, marginalizeKeys) {
|
BOOST_FOREACH(Key key, marginalizeKeys) {
|
||||||
std::cout << DefaultKeyFormatter(key) << " ";
|
std::cout << DefaultKeyFormatter(key) << " ";
|
||||||
|
@ -191,13 +205,14 @@ void BatchFixedLagSmoother::reorder(const std::set<Key>& marginalizeKeys) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// COLAMD groups will be used to place marginalize keys in Group 0, and everything else in Group 1
|
// COLAMD groups will be used to place marginalize keys in Group 0, and everything else in Group 1
|
||||||
ordering_ = Ordering::colamdConstrainedFirst(factors_, std::vector<Key>(marginalizeKeys.begin(), marginalizeKeys.end()));
|
ordering_ = Ordering::colamdConstrainedFirst(factors_,
|
||||||
|
std::vector<Key>(marginalizeKeys.begin(), marginalizeKeys.end()));
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
ordering_.print("New Ordering: ");
|
ordering_.print("New Ordering: ");
|
||||||
}
|
}
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "BatchFixedLagSmoother::reorder() FINISH" << std::endl;
|
std::cout << "BatchFixedLagSmoother::reorder() FINISH" << std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -207,7 +222,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
||||||
|
|
||||||
const bool debug = ISDEBUG("BatchFixedLagSmoother optimize");
|
const bool debug = ISDEBUG("BatchFixedLagSmoother optimize");
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "BatchFixedLagSmoother::optimize() START" << std::endl;
|
std::cout << "BatchFixedLagSmoother::optimize() START" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,14 +246,19 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
||||||
result.error = factors_.error(evalpoint);
|
result.error = factors_.error(evalpoint);
|
||||||
|
|
||||||
// check if we're already close enough
|
// check if we're already close enough
|
||||||
if(result.error <= errorTol) {
|
if (result.error <= errorTol) {
|
||||||
if(debug) { std::cout << "BatchFixedLagSmoother::optimize Exiting, as error = " << result.error << " < " << errorTol << std::endl; }
|
if (debug) {
|
||||||
|
std::cout << "BatchFixedLagSmoother::optimize Exiting, as error = "
|
||||||
|
<< result.error << " < " << errorTol << std::endl;
|
||||||
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "BatchFixedLagSmoother::optimize linearValues: " << linearKeys_.size() << std::endl;
|
std::cout << "BatchFixedLagSmoother::optimize linearValues: "
|
||||||
std::cout << "BatchFixedLagSmoother::optimize Initial error: " << result.error << std::endl;
|
<< linearKeys_.size() << std::endl;
|
||||||
|
std::cout << "BatchFixedLagSmoother::optimize Initial error: "
|
||||||
|
<< result.error << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use a custom optimization loop so the linearization points can be controlled
|
// Use a custom optimization loop so the linearization points can be controlled
|
||||||
|
@ -254,9 +274,12 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
||||||
GaussianFactorGraph linearFactorGraph = *factors_.linearize(theta_);
|
GaussianFactorGraph linearFactorGraph = *factors_.linearize(theta_);
|
||||||
|
|
||||||
// Keep increasing lambda until we make make progress
|
// Keep increasing lambda until we make make progress
|
||||||
while(true) {
|
while (true) {
|
||||||
|
|
||||||
if(debug) { std::cout << "BatchFixedLagSmoother::optimize trying lambda = " << lambda << std::endl; }
|
if (debug) {
|
||||||
|
std::cout << "BatchFixedLagSmoother::optimize trying lambda = "
|
||||||
|
<< lambda << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
// Add prior factors at the current solution
|
// Add prior factors at the current solution
|
||||||
gttic(damp);
|
gttic(damp);
|
||||||
|
@ -267,10 +290,11 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
||||||
double sigma = 1.0 / std::sqrt(lambda);
|
double sigma = 1.0 / std::sqrt(lambda);
|
||||||
BOOST_FOREACH(const VectorValues::KeyValuePair& key_value, delta_) {
|
BOOST_FOREACH(const VectorValues::KeyValuePair& key_value, delta_) {
|
||||||
size_t dim = key_value.second.size();
|
size_t dim = key_value.second.size();
|
||||||
Matrix A = Matrix::Identity(dim,dim);
|
Matrix A = Matrix::Identity(dim, dim);
|
||||||
Vector b = key_value.second;
|
Vector b = key_value.second;
|
||||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma);
|
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma);
|
||||||
GaussianFactor::shared_ptr prior(new JacobianFactor(key_value.first, A, b, model));
|
GaussianFactor::shared_ptr prior(
|
||||||
|
new JacobianFactor(key_value.first, A, b, model));
|
||||||
dampedFactorGraph.push_back(prior);
|
dampedFactorGraph.push_back(prior);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -279,7 +303,8 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
||||||
|
|
||||||
gttic(solve);
|
gttic(solve);
|
||||||
// Solve Damped Gaussian Factor Graph
|
// Solve Damped Gaussian Factor Graph
|
||||||
newDelta = dampedFactorGraph.optimize(ordering_, parameters_.getEliminationFunction());
|
newDelta = dampedFactorGraph.optimize(ordering_,
|
||||||
|
parameters_.getEliminationFunction());
|
||||||
// update the evalpoint with the new delta
|
// update the evalpoint with the new delta
|
||||||
evalpoint = theta_.retract(newDelta);
|
evalpoint = theta_.retract(newDelta);
|
||||||
gttoc(solve);
|
gttoc(solve);
|
||||||
|
@ -289,12 +314,14 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
||||||
double error = factors_.error(evalpoint);
|
double error = factors_.error(evalpoint);
|
||||||
gttoc(compute_error);
|
gttoc(compute_error);
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "BatchFixedLagSmoother::optimize linear delta norm = " << newDelta.norm() << std::endl;
|
std::cout << "BatchFixedLagSmoother::optimize linear delta norm = "
|
||||||
std::cout << "BatchFixedLagSmoother::optimize next error = " << error << std::endl;
|
<< newDelta.norm() << std::endl;
|
||||||
|
std::cout << "BatchFixedLagSmoother::optimize next error = " << error
|
||||||
|
<< std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(error < result.error) {
|
if (error < result.error) {
|
||||||
// Keep this change
|
// Keep this change
|
||||||
// Update the error value
|
// Update the error value
|
||||||
result.error = error;
|
result.error = error;
|
||||||
|
@ -303,7 +330,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
||||||
// Reset the deltas to zeros
|
// Reset the deltas to zeros
|
||||||
delta_.setZero();
|
delta_.setZero();
|
||||||
// Put the linearization points and deltas back for specific variables
|
// Put the linearization points and deltas back for specific variables
|
||||||
if(enforceConsistency_ && (linearKeys_.size() > 0)) {
|
if (enforceConsistency_ && (linearKeys_.size() > 0)) {
|
||||||
theta_.update(linearKeys_);
|
theta_.update(linearKeys_);
|
||||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, linearKeys_) {
|
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, linearKeys_) {
|
||||||
delta_.at(key_value.key) = newDelta.at(key_value.key);
|
delta_.at(key_value.key) = newDelta.at(key_value.key);
|
||||||
|
@ -311,16 +338,18 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
||||||
}
|
}
|
||||||
// Decrease lambda for next time
|
// Decrease lambda for next time
|
||||||
lambda /= lambdaFactor;
|
lambda /= lambdaFactor;
|
||||||
if(lambda < lambdaLowerBound) {
|
if (lambda < lambdaLowerBound) {
|
||||||
lambda = lambdaLowerBound;
|
lambda = lambdaLowerBound;
|
||||||
}
|
}
|
||||||
// End this lambda search iteration
|
// End this lambda search iteration
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
// Reject this change
|
// Reject this change
|
||||||
if(lambda >= lambdaUpperBound) {
|
if (lambda >= lambdaUpperBound) {
|
||||||
// The maximum lambda has been used. Print a warning and end the search.
|
// The maximum lambda has been used. Print a warning and end the search.
|
||||||
std::cout << "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda" << std::endl;
|
std::cout
|
||||||
|
<< "Warning: Levenberg-Marquardt giving up because cannot decrease error with maximum lambda"
|
||||||
|
<< std::endl;
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
// Increase lambda and continue searching
|
// Increase lambda and continue searching
|
||||||
|
@ -331,15 +360,22 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
||||||
}
|
}
|
||||||
gttoc(optimizer_iteration);
|
gttoc(optimizer_iteration);
|
||||||
|
|
||||||
if(debug) { std::cout << "BatchFixedLagSmoother::optimize using lambda = " << lambda << std::endl; }
|
if (debug) {
|
||||||
|
std::cout << "BatchFixedLagSmoother::optimize using lambda = " << lambda
|
||||||
|
<< std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
result.iterations++;
|
result.iterations++;
|
||||||
} while(result.iterations < maxIterations &&
|
} while (result.iterations < maxIterations
|
||||||
!checkConvergence(relativeErrorTol, absoluteErrorTol, errorTol, previousError, result.error, NonlinearOptimizerParams::SILENT));
|
&& !checkConvergence(relativeErrorTol, absoluteErrorTol, errorTol,
|
||||||
|
previousError, result.error, NonlinearOptimizerParams::SILENT));
|
||||||
|
|
||||||
if(debug) { std::cout << "BatchFixedLagSmoother::optimize newError: " << result.error << std::endl; }
|
if (debug) {
|
||||||
|
std::cout << "BatchFixedLagSmoother::optimize newError: " << result.error
|
||||||
|
<< std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "BatchFixedLagSmoother::optimize() FINISH" << std::endl;
|
std::cout << "BatchFixedLagSmoother::optimize() FINISH" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -356,9 +392,10 @@ void BatchFixedLagSmoother::marginalize(const std::set<Key>& marginalizeKeys) {
|
||||||
|
|
||||||
const bool debug = ISDEBUG("BatchFixedLagSmoother marginalize");
|
const bool debug = ISDEBUG("BatchFixedLagSmoother marginalize");
|
||||||
|
|
||||||
if(debug) std::cout << "BatchFixedLagSmoother::marginalize Begin" << std::endl;
|
if (debug)
|
||||||
|
std::cout << "BatchFixedLagSmoother::marginalize Begin" << std::endl;
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "BatchFixedLagSmoother::marginalize Marginalize Keys: ";
|
std::cout << "BatchFixedLagSmoother::marginalize Marginalize Keys: ";
|
||||||
BOOST_FOREACH(Key key, marginalizeKeys) {
|
BOOST_FOREACH(Key key, marginalizeKeys) {
|
||||||
std::cout << DefaultKeyFormatter(key) << " ";
|
std::cout << DefaultKeyFormatter(key) << " ";
|
||||||
|
@ -374,7 +411,7 @@ void BatchFixedLagSmoother::marginalize(const std::set<Key>& marginalizeKeys) {
|
||||||
removedFactorSlots.insert(slots.begin(), slots.end());
|
removedFactorSlots.insert(slots.begin(), slots.end());
|
||||||
}
|
}
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "BatchFixedLagSmoother::marginalize Removed Factor Slots: ";
|
std::cout << "BatchFixedLagSmoother::marginalize Removed Factor Slots: ";
|
||||||
BOOST_FOREACH(size_t slot, removedFactorSlots) {
|
BOOST_FOREACH(size_t slot, removedFactorSlots) {
|
||||||
std::cout << slot << " ";
|
std::cout << slot << " ";
|
||||||
|
@ -385,20 +422,24 @@ void BatchFixedLagSmoother::marginalize(const std::set<Key>& marginalizeKeys) {
|
||||||
// Add the removed factors to a factor graph
|
// Add the removed factors to a factor graph
|
||||||
NonlinearFactorGraph removedFactors;
|
NonlinearFactorGraph removedFactors;
|
||||||
BOOST_FOREACH(size_t slot, removedFactorSlots) {
|
BOOST_FOREACH(size_t slot, removedFactorSlots) {
|
||||||
if(factors_.at(slot)) {
|
if (factors_.at(slot)) {
|
||||||
removedFactors.push_back(factors_.at(slot));
|
removedFactors.push_back(factors_.at(slot));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
PrintSymbolicGraph(removedFactors, "BatchFixedLagSmoother::marginalize Removed Factors: ");
|
PrintSymbolicGraph(removedFactors,
|
||||||
|
"BatchFixedLagSmoother::marginalize Removed Factors: ");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate marginal factors on the remaining keys
|
// Calculate marginal factors on the remaining keys
|
||||||
NonlinearFactorGraph marginalFactors = calculateMarginalFactors(removedFactors, theta_, marginalizeKeys, parameters_.getEliminationFunction());
|
NonlinearFactorGraph marginalFactors = calculateMarginalFactors(
|
||||||
|
removedFactors, theta_, marginalizeKeys,
|
||||||
|
parameters_.getEliminationFunction());
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
PrintSymbolicGraph(removedFactors, "BatchFixedLagSmoother::marginalize Marginal Factors: ");
|
PrintSymbolicGraph(removedFactors,
|
||||||
|
"BatchFixedLagSmoother::marginalize Marginal Factors: ");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove marginalized factors from the factor graph
|
// Remove marginalized factors from the factor graph
|
||||||
|
@ -412,7 +453,8 @@ void BatchFixedLagSmoother::marginalize(const std::set<Key>& marginalizeKeys) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void BatchFixedLagSmoother::PrintKeySet(const std::set<Key>& keys, const std::string& label) {
|
void BatchFixedLagSmoother::PrintKeySet(const std::set<Key>& keys,
|
||||||
|
const std::string& label) {
|
||||||
std::cout << label;
|
std::cout << label;
|
||||||
BOOST_FOREACH(gtsam::Key key, keys) {
|
BOOST_FOREACH(gtsam::Key key, keys) {
|
||||||
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
||||||
|
@ -421,7 +463,8 @@ void BatchFixedLagSmoother::PrintKeySet(const std::set<Key>& keys, const std::st
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void BatchFixedLagSmoother::PrintKeySet(const gtsam::FastSet<Key>& keys, const std::string& label) {
|
void BatchFixedLagSmoother::PrintKeySet(const gtsam::FastSet<Key>& keys,
|
||||||
|
const std::string& label) {
|
||||||
std::cout << label;
|
std::cout << label;
|
||||||
BOOST_FOREACH(gtsam::Key key, keys) {
|
BOOST_FOREACH(gtsam::Key key, keys) {
|
||||||
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
||||||
|
@ -430,9 +473,10 @@ void BatchFixedLagSmoother::PrintKeySet(const gtsam::FastSet<Key>& keys, const s
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void BatchFixedLagSmoother::PrintSymbolicFactor(const NonlinearFactor::shared_ptr& factor) {
|
void BatchFixedLagSmoother::PrintSymbolicFactor(
|
||||||
|
const NonlinearFactor::shared_ptr& factor) {
|
||||||
std::cout << "f(";
|
std::cout << "f(";
|
||||||
if(factor) {
|
if (factor) {
|
||||||
BOOST_FOREACH(Key key, factor->keys()) {
|
BOOST_FOREACH(Key key, factor->keys()) {
|
||||||
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
||||||
}
|
}
|
||||||
|
@ -443,7 +487,8 @@ void BatchFixedLagSmoother::PrintSymbolicFactor(const NonlinearFactor::shared_pt
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void BatchFixedLagSmoother::PrintSymbolicFactor(const GaussianFactor::shared_ptr& factor) {
|
void BatchFixedLagSmoother::PrintSymbolicFactor(
|
||||||
|
const GaussianFactor::shared_ptr& factor) {
|
||||||
std::cout << "f(";
|
std::cout << "f(";
|
||||||
BOOST_FOREACH(Key key, factor->keys()) {
|
BOOST_FOREACH(Key key, factor->keys()) {
|
||||||
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
||||||
|
@ -452,7 +497,8 @@ void BatchFixedLagSmoother::PrintSymbolicFactor(const GaussianFactor::shared_ptr
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void BatchFixedLagSmoother::PrintSymbolicGraph(const NonlinearFactorGraph& graph, const std::string& label) {
|
void BatchFixedLagSmoother::PrintSymbolicGraph(
|
||||||
|
const NonlinearFactorGraph& graph, const std::string& label) {
|
||||||
std::cout << label << std::endl;
|
std::cout << label << std::endl;
|
||||||
BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, graph) {
|
BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, graph) {
|
||||||
PrintSymbolicFactor(factor);
|
PrintSymbolicFactor(factor);
|
||||||
|
@ -460,59 +506,79 @@ void BatchFixedLagSmoother::PrintSymbolicGraph(const NonlinearFactorGraph& graph
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void BatchFixedLagSmoother::PrintSymbolicGraph(const GaussianFactorGraph& graph, const std::string& label) {
|
void BatchFixedLagSmoother::PrintSymbolicGraph(const GaussianFactorGraph& graph,
|
||||||
|
const std::string& label) {
|
||||||
std::cout << label << std::endl;
|
std::cout << label << std::endl;
|
||||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, graph) {
|
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, graph) {
|
||||||
PrintSymbolicFactor(factor);
|
PrintSymbolicFactor(factor);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
NonlinearFactorGraph BatchFixedLagSmoother::calculateMarginalFactors(const NonlinearFactorGraph& graph, const Values& theta,
|
NonlinearFactorGraph BatchFixedLagSmoother::calculateMarginalFactors(
|
||||||
const std::set<Key>& marginalizeKeys, const GaussianFactorGraph::Eliminate& eliminateFunction) {
|
const NonlinearFactorGraph& graph, const Values& theta,
|
||||||
|
const std::set<Key>& marginalizeKeys,
|
||||||
|
const GaussianFactorGraph::Eliminate& eliminateFunction) {
|
||||||
|
|
||||||
const bool debug = ISDEBUG("BatchFixedLagSmoother calculateMarginalFactors");
|
const bool debug = ISDEBUG("BatchFixedLagSmoother calculateMarginalFactors");
|
||||||
|
|
||||||
if(debug) std::cout << "BatchFixedLagSmoother::calculateMarginalFactors START" << std::endl;
|
if (debug)
|
||||||
|
std::cout << "BatchFixedLagSmoother::calculateMarginalFactors START"
|
||||||
|
<< std::endl;
|
||||||
|
|
||||||
if(debug) PrintKeySet(marginalizeKeys, "BatchFixedLagSmoother::calculateMarginalFactors Marginalize Keys: ");
|
if (debug)
|
||||||
|
PrintKeySet(marginalizeKeys,
|
||||||
|
"BatchFixedLagSmoother::calculateMarginalFactors Marginalize Keys: ");
|
||||||
|
|
||||||
// Get the set of all keys involved in the factor graph
|
// Get the set of all keys involved in the factor graph
|
||||||
FastSet<Key> allKeys(graph.keys());
|
FastSet<Key> allKeys(graph.keys());
|
||||||
if(debug) PrintKeySet(allKeys, "BatchFixedLagSmoother::calculateMarginalFactors All Keys: ");
|
if (debug)
|
||||||
|
PrintKeySet(allKeys,
|
||||||
|
"BatchFixedLagSmoother::calculateMarginalFactors All Keys: ");
|
||||||
|
|
||||||
// Calculate the set of RemainingKeys = AllKeys \Intersect marginalizeKeys
|
// Calculate the set of RemainingKeys = AllKeys \Intersect marginalizeKeys
|
||||||
FastSet<Key> remainingKeys;
|
FastSet<Key> remainingKeys;
|
||||||
std::set_difference(allKeys.begin(), allKeys.end(), marginalizeKeys.begin(), marginalizeKeys.end(), std::inserter(remainingKeys, remainingKeys.end()));
|
std::set_difference(allKeys.begin(), allKeys.end(), marginalizeKeys.begin(),
|
||||||
if(debug) PrintKeySet(remainingKeys, "BatchFixedLagSmoother::calculateMarginalFactors Remaining Keys: ");
|
marginalizeKeys.end(), std::inserter(remainingKeys, remainingKeys.end()));
|
||||||
|
if (debug)
|
||||||
|
PrintKeySet(remainingKeys,
|
||||||
|
"BatchFixedLagSmoother::calculateMarginalFactors Remaining Keys: ");
|
||||||
|
|
||||||
if(marginalizeKeys.size() == 0) {
|
if (marginalizeKeys.size() == 0) {
|
||||||
// There are no keys to marginalize. Simply return the input factors
|
// There are no keys to marginalize. Simply return the input factors
|
||||||
if(debug) std::cout << "BatchFixedLagSmoother::calculateMarginalFactors FINISH" << std::endl;
|
if (debug)
|
||||||
|
std::cout << "BatchFixedLagSmoother::calculateMarginalFactors FINISH"
|
||||||
|
<< std::endl;
|
||||||
return graph;
|
return graph;
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
// Create the linear factor graph
|
// Create the linear factor graph
|
||||||
GaussianFactorGraph linearFactorGraph = *graph.linearize(theta);
|
GaussianFactorGraph linearFactorGraph = *graph.linearize(theta);
|
||||||
// .first is the eliminated Bayes tree, while .second is the remaining factor graph
|
// .first is the eliminated Bayes tree, while .second is the remaining factor graph
|
||||||
GaussianFactorGraph marginalLinearFactors = *linearFactorGraph.eliminatePartialMultifrontal(std::vector<Key>(marginalizeKeys.begin(), marginalizeKeys.end())).second;
|
GaussianFactorGraph marginalLinearFactors =
|
||||||
|
*linearFactorGraph.eliminatePartialMultifrontal(
|
||||||
|
std::vector<Key>(marginalizeKeys.begin(), marginalizeKeys.end())).second;
|
||||||
|
|
||||||
// Wrap in nonlinear container factors
|
// Wrap in nonlinear container factors
|
||||||
NonlinearFactorGraph marginalFactors;
|
NonlinearFactorGraph marginalFactors;
|
||||||
marginalFactors.reserve(marginalLinearFactors.size());
|
marginalFactors.reserve(marginalLinearFactors.size());
|
||||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& gaussianFactor, marginalLinearFactors) {
|
BOOST_FOREACH(const GaussianFactor::shared_ptr& gaussianFactor, marginalLinearFactors) {
|
||||||
marginalFactors += boost::make_shared<LinearContainerFactor>(gaussianFactor, theta);
|
marginalFactors += boost::make_shared<LinearContainerFactor>(
|
||||||
if(debug) {
|
gaussianFactor, theta);
|
||||||
std::cout << "BatchFixedLagSmoother::calculateMarginalFactors Marginal Factor: ";
|
if (debug) {
|
||||||
|
std::cout
|
||||||
|
<< "BatchFixedLagSmoother::calculateMarginalFactors Marginal Factor: ";
|
||||||
PrintSymbolicFactor(marginalFactors.back());
|
PrintSymbolicFactor(marginalFactors.back());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if(debug) PrintSymbolicGraph(marginalFactors, "BatchFixedLagSmoother::calculateMarginalFactors All Marginal Factors: ");
|
if (debug)
|
||||||
|
PrintSymbolicGraph(marginalFactors,
|
||||||
|
"BatchFixedLagSmoother::calculateMarginalFactors All Marginal Factors: ");
|
||||||
|
|
||||||
if(debug) std::cout << "BatchFixedLagSmoother::calculateMarginalFactors FINISH" << std::endl;
|
if (debug)
|
||||||
|
std::cout << "BatchFixedLagSmoother::calculateMarginalFactors FINISH"
|
||||||
|
<< std::endl;
|
||||||
|
|
||||||
return marginalFactors;
|
return marginalFactors;
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,6 +100,12 @@ public:
|
||||||
return delta_;
|
return delta_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Calculate marginal covariance on given variable
|
||||||
|
Matrix marginalCovariance(Key key) const;
|
||||||
|
|
||||||
|
static NonlinearFactorGraph calculateMarginalFactors(const NonlinearFactorGraph& graph, const Values& theta,
|
||||||
|
const std::set<Key>& marginalizeKeys, const GaussianFactorGraph::Eliminate& eliminateFunction);
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
/** A typedef defining an Key-Factor mapping **/
|
/** A typedef defining an Key-Factor mapping **/
|
||||||
|
@ -134,8 +140,6 @@ protected:
|
||||||
/** A cross-reference structure to allow efficient factor lookups by key **/
|
/** A cross-reference structure to allow efficient factor lookups by key **/
|
||||||
FactorIndex factorIndex_;
|
FactorIndex factorIndex_;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/** Augment the list of factors with a set of new factors */
|
/** Augment the list of factors with a set of new factors */
|
||||||
void insertFactors(const NonlinearFactorGraph& newFactors);
|
void insertFactors(const NonlinearFactorGraph& newFactors);
|
||||||
|
|
||||||
|
@ -154,9 +158,6 @@ protected:
|
||||||
/** Marginalize out selected variables */
|
/** Marginalize out selected variables */
|
||||||
void marginalize(const std::set<Key>& marginalizableKeys);
|
void marginalize(const std::set<Key>& marginalizableKeys);
|
||||||
|
|
||||||
static NonlinearFactorGraph calculateMarginalFactors(const NonlinearFactorGraph& graph, const Values& theta,
|
|
||||||
const std::set<Key>& marginalizeKeys, const GaussianFactorGraph::Eliminate& eliminateFunction);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/** Private methods for printing debug information */
|
/** Private methods for printing debug information */
|
||||||
static void PrintKeySet(const std::set<Key>& keys, const std::string& label);
|
static void PrintKeySet(const std::set<Key>& keys, const std::string& label);
|
||||||
|
|
|
@ -37,7 +37,7 @@ bool FixedLagSmoother::equals(const FixedLagSmoother& rhs, double tol) const {
|
||||||
void FixedLagSmoother::updateKeyTimestampMap(const KeyTimestampMap& timestamps) {
|
void FixedLagSmoother::updateKeyTimestampMap(const KeyTimestampMap& timestamps) {
|
||||||
// Loop through each key and add/update it in the map
|
// Loop through each key and add/update it in the map
|
||||||
BOOST_FOREACH(const KeyTimestampMap::value_type& key_timestamp, timestamps) {
|
BOOST_FOREACH(const KeyTimestampMap::value_type& key_timestamp, timestamps) {
|
||||||
// Check to see if this key already exists inthe database
|
// Check to see if this key already exists in the database
|
||||||
KeyTimestampMap::iterator keyIter = keyTimestampMap_.find(key_timestamp.first);
|
KeyTimestampMap::iterator keyIter = keyTimestampMap_.find(key_timestamp.first);
|
||||||
|
|
||||||
// If the key already exists
|
// If the key already exists
|
||||||
|
|
|
@ -25,10 +25,13 @@
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void recursiveMarkAffectedKeys(const Key& key, const ISAM2Clique::shared_ptr& clique, std::set<Key>& additionalKeys) {
|
void recursiveMarkAffectedKeys(const Key& key,
|
||||||
|
const ISAM2Clique::shared_ptr& clique, std::set<Key>& additionalKeys) {
|
||||||
|
|
||||||
// Check if the separator keys of the current clique contain the specified key
|
// Check if the separator keys of the current clique contain the specified key
|
||||||
if(std::find(clique->conditional()->beginParents(), clique->conditional()->endParents(), key) != clique->conditional()->endParents()) {
|
if (std::find(clique->conditional()->beginParents(),
|
||||||
|
clique->conditional()->endParents(), key)
|
||||||
|
!= clique->conditional()->endParents()) {
|
||||||
|
|
||||||
// Mark the frontal keys of the current clique
|
// Mark the frontal keys of the current clique
|
||||||
BOOST_FOREACH(Key i, clique->conditional()->frontals()) {
|
BOOST_FOREACH(Key i, clique->conditional()->frontals()) {
|
||||||
|
@ -44,32 +47,36 @@ void recursiveMarkAffectedKeys(const Key& key, const ISAM2Clique::shared_ptr& cl
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void IncrementalFixedLagSmoother::print(const std::string& s, const KeyFormatter& keyFormatter) const {
|
void IncrementalFixedLagSmoother::print(const std::string& s,
|
||||||
|
const KeyFormatter& keyFormatter) const {
|
||||||
FixedLagSmoother::print(s, keyFormatter);
|
FixedLagSmoother::print(s, keyFormatter);
|
||||||
// TODO: What else to print?
|
// TODO: What else to print?
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
bool IncrementalFixedLagSmoother::equals(const FixedLagSmoother& rhs, double tol) const {
|
bool IncrementalFixedLagSmoother::equals(const FixedLagSmoother& rhs,
|
||||||
const IncrementalFixedLagSmoother* e = dynamic_cast<const IncrementalFixedLagSmoother*> (&rhs);
|
double tol) const {
|
||||||
return e != NULL
|
const IncrementalFixedLagSmoother* e =
|
||||||
&& FixedLagSmoother::equals(*e, tol)
|
dynamic_cast<const IncrementalFixedLagSmoother*>(&rhs);
|
||||||
|
return e != NULL && FixedLagSmoother::equals(*e, tol)
|
||||||
&& isam_.equals(e->isam_, tol);
|
&& isam_.equals(e->isam_, tol);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
FixedLagSmoother::Result IncrementalFixedLagSmoother::update(const NonlinearFactorGraph& newFactors, const Values& newTheta, const KeyTimestampMap& timestamps) {
|
FixedLagSmoother::Result IncrementalFixedLagSmoother::update(
|
||||||
|
const NonlinearFactorGraph& newFactors, const Values& newTheta,
|
||||||
|
const KeyTimestampMap& timestamps) {
|
||||||
|
|
||||||
const bool debug = ISDEBUG("IncrementalFixedLagSmoother update");
|
const bool debug = ISDEBUG("IncrementalFixedLagSmoother update");
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "IncrementalFixedLagSmoother::update() Start" << std::endl;
|
std::cout << "IncrementalFixedLagSmoother::update() Start" << std::endl;
|
||||||
PrintSymbolicTree(isam_, "Bayes Tree Before Update:");
|
PrintSymbolicTree(isam_, "Bayes Tree Before Update:");
|
||||||
std::cout << "END" << std::endl;
|
std::cout << "END" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
FastVector<size_t> removedFactors;
|
FastVector<size_t> removedFactors;
|
||||||
boost::optional<FastMap<Key,int> > constrainedKeys = boost::none;
|
boost::optional<FastMap<Key, int> > constrainedKeys = boost::none;
|
||||||
|
|
||||||
// Update the Timestamps associated with the factor keys
|
// Update the Timestamps associated with the factor keys
|
||||||
updateKeyTimestampMap(timestamps);
|
updateKeyTimestampMap(timestamps);
|
||||||
|
@ -77,12 +84,14 @@ FixedLagSmoother::Result IncrementalFixedLagSmoother::update(const NonlinearFact
|
||||||
// Get current timestamp
|
// Get current timestamp
|
||||||
double current_timestamp = getCurrentTimestamp();
|
double current_timestamp = getCurrentTimestamp();
|
||||||
|
|
||||||
if(debug) std::cout << "Current Timestamp: " << current_timestamp << std::endl;
|
if (debug)
|
||||||
|
std::cout << "Current Timestamp: " << current_timestamp << std::endl;
|
||||||
|
|
||||||
// Find the set of variables to be marginalized out
|
// Find the set of variables to be marginalized out
|
||||||
std::set<Key> marginalizableKeys = findKeysBefore(current_timestamp - smootherLag_);
|
std::set<Key> marginalizableKeys = findKeysBefore(
|
||||||
|
current_timestamp - smootherLag_);
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "Marginalizable Keys: ";
|
std::cout << "Marginalizable Keys: ";
|
||||||
BOOST_FOREACH(Key key, marginalizableKeys) {
|
BOOST_FOREACH(Key key, marginalizableKeys) {
|
||||||
std::cout << DefaultKeyFormatter(key) << " ";
|
std::cout << DefaultKeyFormatter(key) << " ";
|
||||||
|
@ -93,11 +102,13 @@ FixedLagSmoother::Result IncrementalFixedLagSmoother::update(const NonlinearFact
|
||||||
// Force iSAM2 to put the marginalizable variables at the beginning
|
// Force iSAM2 to put the marginalizable variables at the beginning
|
||||||
createOrderingConstraints(marginalizableKeys, constrainedKeys);
|
createOrderingConstraints(marginalizableKeys, constrainedKeys);
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
std::cout << "Constrained Keys: ";
|
std::cout << "Constrained Keys: ";
|
||||||
if(constrainedKeys) {
|
if (constrainedKeys) {
|
||||||
for(FastMap<Key,int>::const_iterator iter = constrainedKeys->begin(); iter != constrainedKeys->end(); ++iter) {
|
for (FastMap<Key, int>::const_iterator iter = constrainedKeys->begin();
|
||||||
std::cout << DefaultKeyFormatter(iter->first) << "(" << iter->second << ") ";
|
iter != constrainedKeys->end(); ++iter) {
|
||||||
|
std::cout << DefaultKeyFormatter(iter->first) << "(" << iter->second
|
||||||
|
<< ") ";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
|
@ -114,23 +125,26 @@ FixedLagSmoother::Result IncrementalFixedLagSmoother::update(const NonlinearFact
|
||||||
KeyList additionalMarkedKeys(additionalKeys.begin(), additionalKeys.end());
|
KeyList additionalMarkedKeys(additionalKeys.begin(), additionalKeys.end());
|
||||||
|
|
||||||
// Update iSAM2
|
// Update iSAM2
|
||||||
ISAM2Result isamResult = isam_.update(newFactors, newTheta, FastVector<size_t>(), constrainedKeys, boost::none, additionalMarkedKeys);
|
ISAM2Result isamResult = isam_.update(newFactors, newTheta,
|
||||||
|
FastVector<size_t>(), constrainedKeys, boost::none, additionalMarkedKeys);
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
PrintSymbolicTree(isam_, "Bayes Tree After Update, Before Marginalization:");
|
PrintSymbolicTree(isam_,
|
||||||
|
"Bayes Tree After Update, Before Marginalization:");
|
||||||
std::cout << "END" << std::endl;
|
std::cout << "END" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Marginalize out any needed variables
|
// Marginalize out any needed variables
|
||||||
if(marginalizableKeys.size() > 0) {
|
if (marginalizableKeys.size() > 0) {
|
||||||
FastList<Key> leafKeys(marginalizableKeys.begin(), marginalizableKeys.end());
|
FastList<Key> leafKeys(marginalizableKeys.begin(),
|
||||||
|
marginalizableKeys.end());
|
||||||
isam_.marginalizeLeaves(leafKeys);
|
isam_.marginalizeLeaves(leafKeys);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove marginalized keys from the KeyTimestampMap
|
// Remove marginalized keys from the KeyTimestampMap
|
||||||
eraseKeyTimestampMap(marginalizableKeys);
|
eraseKeyTimestampMap(marginalizableKeys);
|
||||||
|
|
||||||
if(debug) {
|
if (debug) {
|
||||||
PrintSymbolicTree(isam_, "Final Bayes Tree:");
|
PrintSymbolicTree(isam_, "Final Bayes Tree:");
|
||||||
std::cout << "END" << std::endl;
|
std::cout << "END" << std::endl;
|
||||||
}
|
}
|
||||||
|
@ -142,7 +156,8 @@ FixedLagSmoother::Result IncrementalFixedLagSmoother::update(const NonlinearFact
|
||||||
result.nonlinearVariables = 0;
|
result.nonlinearVariables = 0;
|
||||||
result.error = 0;
|
result.error = 0;
|
||||||
|
|
||||||
if(debug) std::cout << "IncrementalFixedLagSmoother::update() Finish" << std::endl;
|
if (debug)
|
||||||
|
std::cout << "IncrementalFixedLagSmoother::update() Finish" << std::endl;
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -151,30 +166,33 @@ FixedLagSmoother::Result IncrementalFixedLagSmoother::update(const NonlinearFact
|
||||||
void IncrementalFixedLagSmoother::eraseKeysBefore(double timestamp) {
|
void IncrementalFixedLagSmoother::eraseKeysBefore(double timestamp) {
|
||||||
TimestampKeyMap::iterator end = timestampKeyMap_.lower_bound(timestamp);
|
TimestampKeyMap::iterator end = timestampKeyMap_.lower_bound(timestamp);
|
||||||
TimestampKeyMap::iterator iter = timestampKeyMap_.begin();
|
TimestampKeyMap::iterator iter = timestampKeyMap_.begin();
|
||||||
while(iter != end) {
|
while (iter != end) {
|
||||||
keyTimestampMap_.erase(iter->second);
|
keyTimestampMap_.erase(iter->second);
|
||||||
timestampKeyMap_.erase(iter++);
|
timestampKeyMap_.erase(iter++);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void IncrementalFixedLagSmoother::createOrderingConstraints(const std::set<Key>& marginalizableKeys, boost::optional<FastMap<Key,int> >& constrainedKeys) const {
|
void IncrementalFixedLagSmoother::createOrderingConstraints(
|
||||||
if(marginalizableKeys.size() > 0) {
|
const std::set<Key>& marginalizableKeys,
|
||||||
constrainedKeys = FastMap<Key,int>();
|
boost::optional<FastMap<Key, int> >& constrainedKeys) const {
|
||||||
|
if (marginalizableKeys.size() > 0) {
|
||||||
|
constrainedKeys = FastMap<Key, int>();
|
||||||
// Generate ordering constraints so that the marginalizable variables will be eliminated first
|
// Generate ordering constraints so that the marginalizable variables will be eliminated first
|
||||||
// Set all variables to Group1
|
// Set all variables to Group1
|
||||||
BOOST_FOREACH(const TimestampKeyMap::value_type& timestamp_key, timestampKeyMap_) {
|
BOOST_FOREACH(const TimestampKeyMap::value_type& timestamp_key, timestampKeyMap_) {
|
||||||
constrainedKeys->operator[](timestamp_key.second) = 1;
|
constrainedKeys->operator[](timestamp_key.second) = 1;
|
||||||
}
|
}
|
||||||
// Set marginalizable variables to Group0
|
// Set marginalizable variables to Group0
|
||||||
BOOST_FOREACH(Key key, marginalizableKeys){
|
BOOST_FOREACH(Key key, marginalizableKeys) {
|
||||||
constrainedKeys->operator[](key) = 0;
|
constrainedKeys->operator[](key) = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void IncrementalFixedLagSmoother::PrintKeySet(const std::set<Key>& keys, const std::string& label) {
|
void IncrementalFixedLagSmoother::PrintKeySet(const std::set<Key>& keys,
|
||||||
|
const std::string& label) {
|
||||||
std::cout << label;
|
std::cout << label;
|
||||||
BOOST_FOREACH(gtsam::Key key, keys) {
|
BOOST_FOREACH(gtsam::Key key, keys) {
|
||||||
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
||||||
|
@ -183,7 +201,8 @@ void IncrementalFixedLagSmoother::PrintKeySet(const std::set<Key>& keys, const s
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void IncrementalFixedLagSmoother::PrintSymbolicFactor(const GaussianFactor::shared_ptr& factor) {
|
void IncrementalFixedLagSmoother::PrintSymbolicFactor(
|
||||||
|
const GaussianFactor::shared_ptr& factor) {
|
||||||
std::cout << "f(";
|
std::cout << "f(";
|
||||||
BOOST_FOREACH(gtsam::Key key, factor->keys()) {
|
BOOST_FOREACH(gtsam::Key key, factor->keys()) {
|
||||||
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
||||||
|
@ -192,7 +211,8 @@ void IncrementalFixedLagSmoother::PrintSymbolicFactor(const GaussianFactor::shar
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void IncrementalFixedLagSmoother::PrintSymbolicGraph(const GaussianFactorGraph& graph, const std::string& label) {
|
void IncrementalFixedLagSmoother::PrintSymbolicGraph(
|
||||||
|
const GaussianFactorGraph& graph, const std::string& label) {
|
||||||
std::cout << label << std::endl;
|
std::cout << label << std::endl;
|
||||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, graph) {
|
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, graph) {
|
||||||
PrintSymbolicFactor(factor);
|
PrintSymbolicFactor(factor);
|
||||||
|
@ -200,27 +220,28 @@ void IncrementalFixedLagSmoother::PrintSymbolicGraph(const GaussianFactorGraph&
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void IncrementalFixedLagSmoother::PrintSymbolicTree(const gtsam::ISAM2& isam, const std::string& label) {
|
void IncrementalFixedLagSmoother::PrintSymbolicTree(const gtsam::ISAM2& isam,
|
||||||
|
const std::string& label) {
|
||||||
std::cout << label << std::endl;
|
std::cout << label << std::endl;
|
||||||
if(!isam.roots().empty())
|
if (!isam.roots().empty()) {
|
||||||
{
|
BOOST_FOREACH(const ISAM2::sharedClique& root, isam.roots()) {
|
||||||
BOOST_FOREACH(const ISAM2::sharedClique& root, isam.roots()){
|
PrintSymbolicTreeHelper(root);
|
||||||
PrintSymbolicTreeHelper(root);
|
|
||||||
}
|
}
|
||||||
}
|
} else
|
||||||
else
|
|
||||||
std::cout << "{Empty Tree}" << std::endl;
|
std::cout << "{Empty Tree}" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
void IncrementalFixedLagSmoother::PrintSymbolicTreeHelper(const gtsam::ISAM2Clique::shared_ptr& clique, const std::string indent) {
|
void IncrementalFixedLagSmoother::PrintSymbolicTreeHelper(
|
||||||
|
const gtsam::ISAM2Clique::shared_ptr& clique, const std::string indent) {
|
||||||
|
|
||||||
// Print the current clique
|
// Print the current clique
|
||||||
std::cout << indent << "P( ";
|
std::cout << indent << "P( ";
|
||||||
BOOST_FOREACH(gtsam::Key key, clique->conditional()->frontals()) {
|
BOOST_FOREACH(gtsam::Key key, clique->conditional()->frontals()) {
|
||||||
std::cout << gtsam::DefaultKeyFormatter(key) << " ";
|
std::cout << gtsam::DefaultKeyFormatter(key) << " ";
|
||||||
}
|
}
|
||||||
if(clique->conditional()->nrParents() > 0) std::cout << "| ";
|
if (clique->conditional()->nrParents() > 0)
|
||||||
|
std::cout << "| ";
|
||||||
BOOST_FOREACH(gtsam::Key key, clique->conditional()->parents()) {
|
BOOST_FOREACH(gtsam::Key key, clique->conditional()->parents()) {
|
||||||
std::cout << gtsam::DefaultKeyFormatter(key) << " ";
|
std::cout << gtsam::DefaultKeyFormatter(key) << " ";
|
||||||
}
|
}
|
||||||
|
@ -228,7 +249,7 @@ void IncrementalFixedLagSmoother::PrintSymbolicTreeHelper(const gtsam::ISAM2Cliq
|
||||||
|
|
||||||
// Recursively print all of the children
|
// Recursively print all of the children
|
||||||
BOOST_FOREACH(const gtsam::ISAM2Clique::shared_ptr& child, clique->children) {
|
BOOST_FOREACH(const gtsam::ISAM2Clique::shared_ptr& child, clique->children) {
|
||||||
PrintSymbolicTreeHelper(child, indent+" ");
|
PrintSymbolicTreeHelper(child, indent + " ");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
#include <gtsam_unstable/nonlinear/FixedLagSmoother.h>
|
#include <gtsam_unstable/nonlinear/FixedLagSmoother.h>
|
||||||
#include <gtsam/nonlinear/ISAM2.h>
|
#include <gtsam/nonlinear/ISAM2.h>
|
||||||
|
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -31,7 +30,7 @@ namespace gtsam {
|
||||||
* such that the active states are placed in/near the root. This base class implements a function
|
* such that the active states are placed in/near the root. This base class implements a function
|
||||||
* to calculate the ordering, and an update function to incorporate new factors into the HMF.
|
* to calculate the ordering, and an update function to incorporate new factors into the HMF.
|
||||||
*/
|
*/
|
||||||
class GTSAM_UNSTABLE_EXPORT IncrementalFixedLagSmoother : public FixedLagSmoother {
|
class GTSAM_UNSTABLE_EXPORT IncrementalFixedLagSmoother: public FixedLagSmoother {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
@ -39,20 +38,30 @@ public:
|
||||||
typedef boost::shared_ptr<IncrementalFixedLagSmoother> shared_ptr;
|
typedef boost::shared_ptr<IncrementalFixedLagSmoother> shared_ptr;
|
||||||
|
|
||||||
/** default constructor */
|
/** default constructor */
|
||||||
IncrementalFixedLagSmoother(double smootherLag = 0.0, const ISAM2Params& parameters = ISAM2Params())
|
IncrementalFixedLagSmoother(double smootherLag = 0.0,
|
||||||
: FixedLagSmoother(smootherLag), isam_(parameters) {}
|
const ISAM2Params& parameters = ISAM2Params()) :
|
||||||
|
FixedLagSmoother(smootherLag), isam_(parameters) {
|
||||||
|
}
|
||||||
|
|
||||||
/** destructor */
|
/** destructor */
|
||||||
virtual ~IncrementalFixedLagSmoother() {}
|
virtual ~IncrementalFixedLagSmoother() {
|
||||||
|
}
|
||||||
|
|
||||||
/** Print the factor for debugging and testing (implementing Testable) */
|
/** Print the factor for debugging and testing (implementing Testable) */
|
||||||
virtual void print(const std::string& s = "IncrementalFixedLagSmoother:\n", const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
virtual void print(const std::string& s = "IncrementalFixedLagSmoother:\n",
|
||||||
|
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const;
|
||||||
|
|
||||||
/** Check if two IncrementalFixedLagSmoother Objects are equal */
|
/** Check if two IncrementalFixedLagSmoother Objects are equal */
|
||||||
virtual bool equals(const FixedLagSmoother& rhs, double tol = 1e-9) const;
|
virtual bool equals(const FixedLagSmoother& rhs, double tol = 1e-9) const;
|
||||||
|
|
||||||
/** Add new factors, updating the solution and relinearizing as needed. */
|
/**
|
||||||
Result update(const NonlinearFactorGraph& newFactors = NonlinearFactorGraph(), const Values& newTheta = Values(),
|
* Add new factors, updating the solution and re-linearizing as needed.
|
||||||
|
* @param newFactors new factors on old and/or new variables
|
||||||
|
* @param newTheta new values for new variables only
|
||||||
|
* @param timestamps an (optional) map from keys to real time stamps
|
||||||
|
*/
|
||||||
|
Result update(const NonlinearFactorGraph& newFactors = NonlinearFactorGraph(),
|
||||||
|
const Values& newTheta = Values(), //
|
||||||
const KeyTimestampMap& timestamps = KeyTimestampMap());
|
const KeyTimestampMap& timestamps = KeyTimestampMap());
|
||||||
|
|
||||||
/** Compute an estimate from the incomplete linear delta computed during the last update.
|
/** Compute an estimate from the incomplete linear delta computed during the last update.
|
||||||
|
@ -94,6 +103,11 @@ public:
|
||||||
return isam_.getDelta();
|
return isam_.getDelta();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Calculate marginal covariance on given variable
|
||||||
|
Matrix marginalCovariance(Key key) const {
|
||||||
|
return isam_.marginalCovariance(key);
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/** An iSAM2 object used to perform inference. The smoother lag is controlled
|
/** An iSAM2 object used to perform inference. The smoother lag is controlled
|
||||||
* by what factors are removed each iteration */
|
* by what factors are removed each iteration */
|
||||||
|
@ -103,16 +117,23 @@ protected:
|
||||||
void eraseKeysBefore(double timestamp);
|
void eraseKeysBefore(double timestamp);
|
||||||
|
|
||||||
/** Fill in an iSAM2 ConstrainedKeys structure such that the provided keys are eliminated before all others */
|
/** Fill in an iSAM2 ConstrainedKeys structure such that the provided keys are eliminated before all others */
|
||||||
void createOrderingConstraints(const std::set<Key>& marginalizableKeys, boost::optional<FastMap<Key,int> >& constrainedKeys) const;
|
void createOrderingConstraints(const std::set<Key>& marginalizableKeys,
|
||||||
|
boost::optional<FastMap<Key, int> >& constrainedKeys) const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/** Private methods for printing debug information */
|
/** Private methods for printing debug information */
|
||||||
static void PrintKeySet(const std::set<Key>& keys, const std::string& label = "Keys:");
|
static void PrintKeySet(const std::set<Key>& keys, const std::string& label =
|
||||||
|
"Keys:");
|
||||||
static void PrintSymbolicFactor(const GaussianFactor::shared_ptr& factor);
|
static void PrintSymbolicFactor(const GaussianFactor::shared_ptr& factor);
|
||||||
static void PrintSymbolicGraph(const GaussianFactorGraph& graph, const std::string& label = "Factor Graph:");
|
static void PrintSymbolicGraph(const GaussianFactorGraph& graph,
|
||||||
static void PrintSymbolicTree(const gtsam::ISAM2& isam, const std::string& label = "Bayes Tree:");
|
const std::string& label = "Factor Graph:");
|
||||||
static void PrintSymbolicTreeHelper(const gtsam::ISAM2Clique::shared_ptr& clique, const std::string indent = "");
|
static void PrintSymbolicTree(const gtsam::ISAM2& isam,
|
||||||
|
const std::string& label = "Bayes Tree:");
|
||||||
|
static void PrintSymbolicTreeHelper(
|
||||||
|
const gtsam::ISAM2Clique::shared_ptr& clique, const std::string indent =
|
||||||
|
"");
|
||||||
|
|
||||||
}; // IncrementalFixedLagSmoother
|
};
|
||||||
|
// IncrementalFixedLagSmoother
|
||||||
|
|
||||||
} /// namespace gtsam
|
}/// namespace gtsam
|
||||||
|
|
|
@ -17,19 +17,20 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
#include <CppUnitLite/TestHarness.h>
|
|
||||||
#include <gtsam_unstable/nonlinear/IncrementalFixedLagSmoother.h>
|
#include <gtsam_unstable/nonlinear/IncrementalFixedLagSmoother.h>
|
||||||
#include <gtsam/base/debug.h>
|
|
||||||
#include <gtsam/geometry/Point2.h>
|
|
||||||
#include <gtsam/inference/Key.h>
|
|
||||||
#include <gtsam/inference/Ordering.h>
|
|
||||||
#include <gtsam/linear/GaussianBayesNet.h>
|
|
||||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
|
||||||
#include <gtsam/slam/PriorFactor.h>
|
#include <gtsam/slam/PriorFactor.h>
|
||||||
#include <gtsam/slam/BetweenFactor.h>
|
#include <gtsam/slam/BetweenFactor.h>
|
||||||
|
#include <gtsam/geometry/Point2.h>
|
||||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
#include <gtsam/nonlinear/Values.h>
|
#include <gtsam/nonlinear/Values.h>
|
||||||
|
#include <gtsam/linear/GaussianBayesNet.h>
|
||||||
|
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||||
|
#include <gtsam/inference/Key.h>
|
||||||
|
#include <gtsam/inference/Ordering.h>
|
||||||
#include <gtsam/inference/Symbol.h>
|
#include <gtsam/inference/Symbol.h>
|
||||||
|
#include <gtsam/base/debug.h>
|
||||||
|
|
||||||
|
#include <CppUnitLite/TestHarness.h>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
|
|
@ -73,6 +73,7 @@ TEST( InvDepthFactorVariant1, optimize) {
|
||||||
// Optimize the graph to recover the actual landmark position
|
// Optimize the graph to recover the actual landmark position
|
||||||
LevenbergMarquardtParams params;
|
LevenbergMarquardtParams params;
|
||||||
Values result = LevenbergMarquardtOptimizer(graph, values, params).optimize();
|
Values result = LevenbergMarquardtOptimizer(graph, values, params).optimize();
|
||||||
|
|
||||||
// Vector6 actual = result.at<Vector6>(landmarkKey);
|
// Vector6 actual = result.at<Vector6>(landmarkKey);
|
||||||
// values.at<Pose3>(poseKey1).print("Pose1 Before:\n");
|
// values.at<Pose3>(poseKey1).print("Pose1 Before:\n");
|
||||||
// result.at<Pose3>(poseKey1).print("Pose1 After:\n");
|
// result.at<Pose3>(poseKey1).print("Pose1 After:\n");
|
||||||
|
@ -114,8 +115,11 @@ TEST( InvDepthFactorVariant1, optimize) {
|
||||||
// However, since this is an over-parameterization, there can be
|
// However, since this is an over-parameterization, there can be
|
||||||
// many 6D landmark values that equate to the same 3D world position
|
// many 6D landmark values that equate to the same 3D world position
|
||||||
// Instead, directly test the recovered 3D landmark position
|
// Instead, directly test the recovered 3D landmark position
|
||||||
//EXPECT(assert_equal(expected, actual, 1e-9));
|
|
||||||
EXPECT(assert_equal(landmark, world_landmarkAfter, 1e-9));
|
EXPECT(assert_equal(landmark, world_landmarkAfter, 1e-9));
|
||||||
|
|
||||||
|
// Frank asks: why commented out?
|
||||||
|
//Vector6 actual = result.at<Vector6>(landmarkKey);
|
||||||
|
//EXPECT(assert_equal(expected, actual, 1e-9));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue