Merge remote-tracking branch 'upstream/develop' into develop

release/4.3a0
Jose Luis Blanco Claraco 2019-01-07 23:34:45 +01:00
commit 8513e4c230
35 changed files with 620 additions and 145 deletions

View File

@ -1,6 +1,6 @@
project(GTSAM CXX C)
cmake_minimum_required(VERSION 2.6)
cmake_minimum_required(VERSION 2.8.4)
# new feature to Cmake Version > 2.8.12
# Mac ONLY. Define Relative Path on Mac OS
@ -69,6 +69,9 @@ option(GTSAM_ALLOW_DEPRECATED_SINCE_V4 "Allow use of methods/functions depreca
option(GTSAM_TYPEDEF_POINTS_TO_VECTORS "Typdef Point2 and Point3 to Eigen::Vector equivalents" OFF)
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
if(NOT MSVC AND NOT XCODE_VERSION)
option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
endif()
# Options relating to MATLAB wrapper
# TODO: Check for matlab mex binary before handling building of binaries
@ -193,6 +196,20 @@ endif()
# Find Google perftools
find_package(GooglePerfTools)
###############################################################################
# Support ccache, if installed
if(NOT MSVC AND NOT XCODE_VERSION)
find_program(CCACHE_FOUND ccache)
if(CCACHE_FOUND)
if(GTSAM_BUILD_WITH_CCACHE)
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
else()
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "")
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK "")
endif()
endif(CCACHE_FOUND)
endif()
###############################################################################
# Find MKL
@ -522,6 +539,15 @@ else()
endif()
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
if(NOT MSVC AND NOT XCODE_VERSION)
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
message(STATUS " Build with ccache : Yes")
elseif(CCACHE_FOUND)
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
else()
message(STATUS " Build with ccache : No")
endif()
endif()
message(STATUS "Packaging flags ")
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")

27
LICENSE
View File

@ -1,18 +1,25 @@
GTSAM is released under the simplified BSD license, reproduced in the file
LICENSE.BSD in this directory.
GTSAM contains two third party libraries, with documentation of licensing and
modifications as follows:
GTSAM contains several third party libraries, with documentation of licensing
and modifications as follows:
- CCOLAMD 2.9.3: Tim Davis' constrained column approximate minimum degree
- CCOLAMD 2.9.6: Tim Davis' constrained column approximate minimum degree
ordering library
- Included unmodified in gtsam/3rdparty/CCOLAMD and gtsam/3rdparty/UFconfig
- Included unmodified in gtsam/3rdparty/CCOLAMD and
gtsam/3rdparty/SuiteSparse_config
- http://faculty.cse.tamu.edu/davis/suitesparse.html
- Licenced under BSD-3, provided in gtsam/3rdparty/CCOLAMD/Doc/License.txt
- Eigen 3.2: General C++ matrix and linear algebra library
- Modified with 3 patches that have been contributed back to the Eigen team:
- http://eigen.tuxfamily.org/bz/show_bug.cgi?id=704 (Householder QR MKL selection)
- http://eigen.tuxfamily.org/bz/show_bug.cgi?id=705 (Fix MKL LLT return code)
- http://eigen.tuxfamily.org/bz/show_bug.cgi?id=716 (Improved comma initialization)
- ceres: Google's nonlinear least-squares optimization library
- Includes only auto-diff/jet code, with minor modifications to includes
- http://ceres-solver.org/license.html
- Eigen 3.3.7: General C++ matrix and linear algebra library
- Licenced under MPL2, provided in gtsam/3rdparty/Eigen/COPYING.README
- Some code that is 3rd-party to Eigen is BSD and LGPL
- Some code that is 3rd-party to Eigen is BSD and LGPL
- GeographicLib 1.35: Charles Karney's geographic conversion utility library
- Included unmodified in gtsam/3rdparty/GeographicLib
- Licenced under MIT, provided in gtsam/3rdparty/GeographicLib/LICENSE.txt
- METIS 5.1.0: Graph partitioning and fill-reducing matrix ordering library
- Included unmodified in gtsam/3rdparty/metis
- Licenced under Apache License v 2.0, provided in
gtsam/3rdparty/metis/LICENSE.txt

View File

@ -1,25 +1,32 @@
"""Pose3 unit tests."""
import math
import unittest
from gtsam import Point3, Rot3, Pose3
import numpy as np
from gtsam import Point3, Pose3, Rot3
class TestPose3(unittest.TestCase):
"""Test selected Pose3 methods."""
def test__between(self):
T2 = Pose3(Rot3.Rodrigues(0.3,0.2,0.1),Point3(3.5,-8.2,4.2))
def test_between(self):
"""Test between method."""
T2 = Pose3(Rot3.Rodrigues(0.3, 0.2, 0.1), Point3(3.5, -8.2, 4.2))
T3 = Pose3(Rot3.Rodrigues(-90, 0, 0), Point3(1, 2, 3))
expected = T2.inverse().compose(T3)
actual = T2.between(T3)
self.assertTrue(actual.equals(expected, 1e-6))
def test_transform_to(self):
transform = Pose3(Rot3.Rodrigues(0,0,-1.570796), Point3(2,4, 0))
actual = transform.transform_to(Point3(3,2,10))
expected = Point3 (2,1,10)
"""Test transform_to method."""
transform = Pose3(Rot3.Rodrigues(0, 0, -1.570796), Point3(2, 4, 0))
actual = transform.transform_to(Point3(3, 2, 10))
expected = Point3(2, 1, 10)
self.assertTrue(actual.equals(expected, 1e-6))
def test_range(self):
"""Test range method."""
l1 = Point3(1, 0, 0)
l2 = Point3(1, 1, 0)
x1 = Pose3()
@ -28,16 +35,23 @@ class TestPose3(unittest.TestCase):
xl2 = Pose3(Rot3.Ypr(0.0, 1.0, 0.0), Point3(1, 1, 0))
# establish range is indeed zero
self.assertEqual(1,x1.range(point=l1))
self.assertEqual(1, x1.range(point=l1))
# establish range is indeed sqrt2
self.assertEqual(math.sqrt(2.0),x1.range(point=l2))
self.assertEqual(math.sqrt(2.0), x1.range(point=l2))
# establish range is indeed zero
self.assertEqual(1,x1.range(pose=xl1))
self.assertEqual(1, x1.range(pose=xl1))
# establish range is indeed sqrt2
self.assertEqual(math.sqrt(2.0),x1.range(pose=xl2))
self.assertEqual(math.sqrt(2.0), x1.range(pose=xl2))
def test_adjoint(self):
"""Test adjoint method."""
xi = np.array([1, 2, 3, 4, 5, 6])
expected = np.dot(Pose3.adjointMap(xi), xi)
actual = Pose3.adjoint(xi, xi)
np.testing.assert_array_equal(actual, expected)
if __name__ == "__main__":

View File

@ -15,6 +15,7 @@
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
using namespace std;
@ -224,7 +225,7 @@ int main(int argc, char* argv[]) {
smartFactors[landmark]->add(StereoPoint2(xl, xr, y), X(frame), K);
} else {
throw runtime_error("unexpected data type: " + type);
throw runtime_error("unexpected data type: " + string(1, type));
}
lastFrame = frame;

View File

@ -0,0 +1,84 @@
/**
* @file Pose2SLAMStressTest.cpp
* @brief Test GTSAM on large open-loop chains
* @date May 23, 2018
* @author Wenqiang Zhou
*/
// Create N 3D poses, add relative motion between each consecutive poses. (The
// relative motion is simply a unit translation(1, 0, 0), no rotation). For each
// each pose, add some random noise to the x value of the translation part.
// Use gtsam to create a prior factor for the first pose and N-1 between factors
// and run optimization.
#include <gtsam/geometry/Cal3_S2Stereo.h>
#include <gtsam/geometry/Pose3.h>
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/nonlinear/NonlinearEquality.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/Values.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/StereoFactor.h>
#include <random>
using namespace std;
using namespace gtsam;
void testGtsam(int numberNodes) {
std::random_device rd;
std::mt19937 e2(rd());
std::uniform_real_distribution<> dist(0, 1);
vector<Pose3> poses;
for (int i = 0; i < numberNodes; ++i) {
Matrix4 M;
double r = dist(e2);
r = (r - 0.5) / 10 + i;
M << 1, 0, 0, r, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1;
poses.push_back(Pose3(M));
}
// prior factor for the first pose
auto priorModel = noiseModel::Isotropic::Variance(6, 1e-4);
Matrix4 first_M;
first_M << 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1;
Pose3 first = Pose3(first_M);
NonlinearFactorGraph graph;
graph.add(PriorFactor<Pose3>(0, first, priorModel));
// vo noise model
auto VOCovarianceModel = noiseModel::Isotropic::Variance(6, 1e-3);
// relative VO motion
Matrix4 vo_M;
vo_M << 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1;
Pose3 relativeMotion(vo_M);
for (int i = 0; i < numberNodes - 1; ++i) {
graph.add(
BetweenFactor<Pose3>(i, i + 1, relativeMotion, VOCovarianceModel));
}
// inital values
Values initial;
for (int i = 0; i < numberNodes; ++i) {
initial.insert(i, poses[i]);
}
LevenbergMarquardtParams params;
params.setVerbosity("ERROR");
params.setOrderingType("METIS");
params.setLinearSolverType("MULTIFRONTAL_CHOLESKY");
LevenbergMarquardtOptimizer optimizer(graph, initial, params);
auto result = optimizer.optimize();
}
int main(int args, char* argv[]) {
int numberNodes = stoi(argv[1]);
cout << "number of_nodes: " << numberNodes << endl;
testGtsam(numberNodes);
return 0;
}

View File

@ -0,0 +1,102 @@
/**
* @file Pose3SLAMExampleExpressions_BearingRangeWithTransform.cpp
* @brief A simultaneous optimization of trajectory, landmarks and sensor-pose with respect to body-pose using bearing-range measurements done with Expressions
* @author Thomas Horstink
* @date January 4th, 2019
*/
#include <gtsam/inference/Symbol.h>
#include <gtsam/geometry/BearingRange.h>
#include <gtsam/slam/expressions.h>
#include <gtsam/nonlinear/ExpressionFactorGraph.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/nonlinear/Values.h>
#include <examples/SFMdata.h>
using namespace gtsam;
typedef BearingRange<Pose3, Point3> BearingRange3D;
/* ************************************************************************* */
int main(int argc, char* argv[]) {
// Move around so the whole state (including the sensor tf) is observable
Pose3 init_pose = Pose3();
Pose3 delta_pose1 = Pose3(Rot3().Yaw(2*M_PI/8).Pitch(M_PI/8), Point3(1, 0, 0));
Pose3 delta_pose2 = Pose3(Rot3().Pitch(-M_PI/8), Point3(1, 0, 0));
Pose3 delta_pose3 = Pose3(Rot3().Yaw(-2*M_PI/8), Point3(1, 0, 0));
int steps = 4;
auto poses = createPoses(init_pose, delta_pose1, steps);
auto poses2 = createPoses(init_pose, delta_pose2, steps);
auto poses3 = createPoses(init_pose, delta_pose3, steps);
// Concatenate poses to create trajectory
poses.insert( poses.end(), poses2.begin(), poses2.end() );
poses.insert( poses.end(), poses3.begin(), poses3.end() ); // std::vector of Pose3
auto points = createPoints(); // std::vector of Point3
// (ground-truth) sensor pose in body frame, further an unknown variable
Pose3 body_T_sensor_gt(Rot3::RzRyRx(-M_PI_2, 0.0, -M_PI_2), Point3(0.25, -0.10, 1.0));
// The graph
ExpressionFactorGraph graph;
// Specify uncertainty on first pose prior and also for between factor (simplicity reasons)
auto poseNoise = noiseModel::Diagonal::Sigmas((Vector(6)<<0.3,0.3,0.3,0.1,0.1,0.1).finished());
// Uncertainty bearing range measurement;
auto bearingRangeNoise = noiseModel::Diagonal::Sigmas((Vector(3)<<0.01,0.03,0.05).finished());
// Expressions for body-frame at key 0 and sensor-tf
Pose3_ x_('x', 0);
Pose3_ body_T_sensor_('T', 0);
// Add a prior on the body-pose
graph.addExpressionFactor(x_, poses[0], poseNoise);
// Simulated measurements from pose
for (size_t i = 0; i < poses.size(); ++i) {
auto world_T_sensor = poses[i].compose(body_T_sensor_gt);
for (size_t j = 0; j < points.size(); ++j) {
// This expression is the key feature of this example: it creates a differentiable expression of the measurement after being displaced by sensor transform.
auto prediction_ = Expression<BearingRange3D>( BearingRange3D::Measure, Pose3_('x',i)*body_T_sensor_, Point3_('l',j));
// Create a *perfect* measurement
auto measurement = BearingRange3D(world_T_sensor.bearing(points[j]), world_T_sensor.range(points[j]));
// Add factor
graph.addExpressionFactor(prediction_, measurement, bearingRangeNoise);
}
// and add a between factor to the graph
if (i > 0)
{
// And also we have a *perfect* measurement for the between factor.
graph.addExpressionFactor(between(Pose3_('x', i-1),Pose3_('x', i)), poses[i-1].between(poses[i]), poseNoise);
}
}
// Create perturbed initial
Values initial;
Pose3 delta(Rot3::Rodrigues(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20));
for (size_t i = 0; i < poses.size(); ++i)
initial.insert(Symbol('x', i), poses[i].compose(delta));
for (size_t j = 0; j < points.size(); ++j)
initial.insert<Point3>(Symbol('l', j), points[j] + Point3(-0.25, 0.20, 0.15));
// Initialize body_T_sensor wrongly (because we do not know!)
initial.insert<Pose3>(Symbol('T',0), Pose3());
std::cout << "initial error: " << graph.error(initial) << std::endl;
Values result = LevenbergMarquardtOptimizer(graph, initial).optimize();
std::cout << "final error: " << graph.error(result) << std::endl;
initial.at<Pose3>(Symbol('T',0)).print("\nInitial estimate body_T_sensor\n"); /* initial sensor_P_body estimate */
result.at<Pose3>(Symbol('T',0)).print("\nFinal estimate body_T_sensor\n"); /* optimized sensor_P_body estimate */
body_T_sensor_gt.print("\nGround truth body_T_sensor\n"); /* sensor_P_body ground truth */
return 0;
}
/* ************************************************************************* */

View File

@ -16,9 +16,10 @@
*/
/**
* A structure-from-motion example with landmarks
* A structure-from-motion example with landmarks, default function arguments give
* - The landmarks form a 10 meter cube
* - The robot rotates around the landmarks, always facing towards the cube
* Passing function argument allows to specificy an initial position, a pose increment and step count.
*/
// As this is a full 3D problem, we will use Pose3 variables to represent the camera
@ -49,20 +50,19 @@ std::vector<gtsam::Point3> createPoints() {
}
/* ************************************************************************* */
std::vector<gtsam::Pose3> createPoses() {
std::vector<gtsam::Pose3> createPoses(
const gtsam::Pose3& init = gtsam::Pose3(gtsam::Rot3::Ypr(M_PI/2,0,-M_PI/2), gtsam::Point3(30, 0, 0)),
const gtsam::Pose3& delta = gtsam::Pose3(gtsam::Rot3::Ypr(0,-M_PI/4,0), gtsam::Point3(sin(M_PI/4)*30, 0, 30*(1-sin(M_PI/4)))),
int steps = 8) {
// Create the set of ground-truth poses
// Default values give a circular trajectory, radius 30 at pi/4 intervals, always facing the circle center
std::vector<gtsam::Pose3> poses;
double radius = 30.0;
int i = 0;
double theta = 0.0;
gtsam::Point3 up(0,0,1);
gtsam::Point3 target(0,0,0);
for(; i < 8; ++i, theta += 2*M_PI/8) {
gtsam::Point3 position = gtsam::Point3(radius*cos(theta), radius*sin(theta), 0.0);
gtsam::SimpleCamera camera = gtsam::SimpleCamera::Lookat(position, target, up);
poses.push_back(camera.pose());
int i = 1;
poses.push_back(init);
for(; i < steps; ++i) {
poses.push_back(poses[i-1].compose(delta));
}
return poses;
}
/* ************************************************************************* */
}

32
gtsam.h
View File

@ -228,6 +228,12 @@ virtual class Value {
size_t dim() const;
};
#include <gtsam/base/GenericValue.h>
template<T = {Vector, gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::StereoPoint2, gtsam::Cal3_S2,gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::imuBias::ConstantBias}>
virtual class GenericValue : gtsam::Value {
void serializable() const;
};
#include <gtsam/base/deprecated/LieScalar.h>
class LieScalar {
// Standard constructors
@ -567,8 +573,13 @@ class Pose2 {
// Lie Group
static gtsam::Pose2 Expmap(Vector v);
static Vector Logmap(const gtsam::Pose2& p);
static Matrix ExpmapDerivative(Vector v);
static Matrix LogmapDerivative(const gtsam::Pose2& v);
Matrix AdjointMap() const;
Vector Adjoint(Vector xi) const;
static Matrix adjointMap(Vector v);
Vector adjoint(Vector xi, Vector y);
Vector adjointTranspose(Vector xi, Vector y);
static Matrix wedge(double vx, double vy, double w);
// Group Actions on Point2
@ -617,6 +628,11 @@ class Pose3 {
static Vector Logmap(const gtsam::Pose3& pose);
Matrix AdjointMap() const;
Vector Adjoint(Vector xi) const;
static Matrix adjointMap(Vector xi);
static Vector adjoint(Vector xi, Vector y);
static Vector adjointTranspose(Vector xi, Vector y);
static Matrix ExpmapDerivative(Vector xi);
static Matrix LogmapDerivative(const gtsam::Pose3& xi);
static Matrix wedge(double wx, double wy, double wz, double vx, double vy, double vz);
// Group Action on Point3
@ -2259,10 +2275,13 @@ virtual class NonlinearEquality : gtsam::NoiseModelFactor {
template<POSE, POINT>
virtual class RangeFactor : gtsam::NoiseModelFactor {
RangeFactor(size_t key1, size_t key2, double measured, const gtsam::noiseModel::Base* noiseModel);
// enabling serialization functionality
void serialize() const;
};
typedef gtsam::RangeFactor<gtsam::Pose2, gtsam::Point2> RangeFactorPosePoint2;
typedef gtsam::RangeFactor<gtsam::Pose3, gtsam::Point3> RangeFactorPosePoint3;
typedef gtsam::RangeFactor<gtsam::Pose2, gtsam::Point2> RangeFactor2D;
typedef gtsam::RangeFactor<gtsam::Pose3, gtsam::Point3> RangeFactor3D;
typedef gtsam::RangeFactor<gtsam::Pose2, gtsam::Pose2> RangeFactorPose2;
typedef gtsam::RangeFactor<gtsam::Pose3, gtsam::Pose3> RangeFactorPose3;
typedef gtsam::RangeFactor<gtsam::CalibratedCamera, gtsam::Point3> RangeFactorCalibratedCameraPoint;
@ -2275,10 +2294,13 @@ typedef gtsam::RangeFactor<gtsam::SimpleCamera, gtsam::SimpleCamera> RangeFactor
template<POSE, POINT>
virtual class RangeFactorWithTransform : gtsam::NoiseModelFactor {
RangeFactorWithTransform(size_t key1, size_t key2, double measured, const gtsam::noiseModel::Base* noiseModel, const POSE& body_T_sensor);
// enabling serialization functionality
void serialize() const;
};
typedef gtsam::RangeFactorWithTransform<gtsam::Pose2, gtsam::Point2> RangeFactorWithTransformPosePoint2;
typedef gtsam::RangeFactorWithTransform<gtsam::Pose3, gtsam::Point3> RangeFactorWithTransformPosePoint3;
typedef gtsam::RangeFactorWithTransform<gtsam::Pose2, gtsam::Point2> RangeFactorWithTransform2D;
typedef gtsam::RangeFactorWithTransform<gtsam::Pose3, gtsam::Point3> RangeFactorWithTransform3D;
typedef gtsam::RangeFactorWithTransform<gtsam::Pose2, gtsam::Pose2> RangeFactorWithTransformPose2;
typedef gtsam::RangeFactorWithTransform<gtsam::Pose3, gtsam::Pose3> RangeFactorWithTransformPose3;
@ -2292,6 +2314,7 @@ virtual class BearingFactor : gtsam::NoiseModelFactor {
};
typedef gtsam::BearingFactor<gtsam::Pose2, gtsam::Point2, gtsam::Rot2> BearingFactor2D;
typedef gtsam::BearingFactor<gtsam::Pose2, gtsam::Pose2, gtsam::Rot2> BearingFactorPose2;
#include <gtsam/sam/BearingRangeFactor.h>
template<POSE, POINT, BEARING, RANGE>
@ -2305,6 +2328,7 @@ virtual class BearingRangeFactor : gtsam::NoiseModelFactor {
};
typedef gtsam::BearingRangeFactor<gtsam::Pose2, gtsam::Point2, gtsam::Rot2, double> BearingRangeFactor2D;
typedef gtsam::BearingRangeFactor<gtsam::Pose2, gtsam::Pose2, gtsam::Rot2, double> BearingRangeFactorPose2;
#include <gtsam/slam/ProjectionFactor.h>

View File

@ -164,11 +164,11 @@ public:
protected:
// implicit assignment operator for (const GenericValue& rhs) works fine here
/// Assignment operator, protected because only the Value or DERIVED
/// assignment operators should be used.
GenericValue<T>& operator=(const GenericValue<T>& rhs) {
// Nothing to do, do not call base class assignment operator
Value::operator=(static_cast<Value const&>(rhs));
value_ = rhs.value_;
return *this;
}

View File

@ -24,7 +24,6 @@
#include <string>
// includes for standard serialization types
#include <boost/serialization/export.hpp>
#include <boost/serialization/optional.hpp>
#include <boost/serialization/shared_ptr.hpp>
#include <boost/serialization/vector.hpp>
@ -39,6 +38,7 @@
#include <boost/archive/xml_oarchive.hpp>
#include <boost/archive/binary_iarchive.hpp>
#include <boost/archive/binary_oarchive.hpp>
#include <boost/serialization/export.hpp>
namespace gtsam {

View File

@ -132,6 +132,8 @@ public:
* Ad_pose is 3*3 matrix that when applied to twist xi \f$ [T_x,T_y,\theta] \f$, returns Ad_pose(xi)
*/
Matrix3 AdjointMap() const;
/// Apply AdjointMap to twist xi
inline Vector3 Adjoint(const Vector3& xi) const {
return AdjointMap()*xi;
}
@ -141,6 +143,20 @@ public:
*/
static Matrix3 adjointMap(const Vector3& v);
/**
* Action of the adjointMap on a Lie-algebra vector y, with optional derivatives
*/
Vector3 adjoint(const Vector3& xi, const Vector3& y) {
return adjointMap(xi) * y;
}
/**
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
*/
Vector3 adjointTranspose(const Vector3& xi, const Vector3& y) {
return adjointMap(xi).transpose() * y;
}
/**
* wedge for SE(2):
* @param xi 3-dim twist (v,omega) where

View File

@ -27,6 +27,8 @@
#include <gtsam/linear/NoiseModel.h>
#include <iosfwd>
#include <string>
#include <utility>
namespace gtsam {
@ -61,7 +63,6 @@ class GTSAM_EXPORT PreintegrationBase {
typedef PreintegrationParams Params;
protected:
/// Parameters. Declared mutable only for deprecated predict method.
/// TODO(frank): make const once deprecated method is removed
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V4
@ -78,7 +79,10 @@ class GTSAM_EXPORT PreintegrationBase {
/// Default constructor for serialization
PreintegrationBase() {}
public:
/// Virtual destructor for serialization
virtual ~PreintegrationBase() {}
public:
/// @name Constructors
/// @{
@ -95,7 +99,7 @@ public:
/// @name Basic utilities
/// @{
/// Re-initialize PreintegratedMeasurements
virtual void resetIntegration()=0;
virtual void resetIntegration() = 0;
/// @name Basic utilities
/// @{
@ -129,10 +133,10 @@ public:
const imuBias::ConstantBias& biasHat() const { return biasHat_; }
double deltaTij() const { return deltaTij_; }
virtual Vector3 deltaPij() const=0;
virtual Vector3 deltaVij() const=0;
virtual Rot3 deltaRij() const=0;
virtual NavState deltaXij() const=0;
virtual Vector3 deltaPij() const = 0;
virtual Vector3 deltaVij() const = 0;
virtual Rot3 deltaRij() const = 0;
virtual NavState deltaXij() const = 0;
// Exposed for MATLAB
Vector6 biasHatVector() const { return biasHat_.vector(); }
@ -147,20 +151,24 @@ public:
/// @name Main functionality
/// @{
/// Subtract estimate and correct for sensor pose
/// Compute the derivatives due to non-identity body_P_sensor (rotation and centrifugal acc)
/// Ignore D_correctedOmega_measuredAcc as it is trivially zero
/**
* Subtract estimate and correct for sensor pose
* Compute the derivatives due to non-identity body_P_sensor (rotation and centrifugal acc)
* Ignore D_correctedOmega_measuredAcc as it is trivially zero
*/
std::pair<Vector3, Vector3> correctMeasurementsBySensorPose(
const Vector3& unbiasedAcc, const Vector3& unbiasedOmega,
OptionalJacobian<3, 3> correctedAcc_H_unbiasedAcc = boost::none,
OptionalJacobian<3, 3> correctedAcc_H_unbiasedOmega = boost::none,
OptionalJacobian<3, 3> correctedOmega_H_unbiasedOmega = boost::none) const;
/// Update preintegrated measurements and get derivatives
/// It takes measured quantities in the j frame
/// Modifies preintegrated quantities in place after correcting for bias and possibly sensor pose
/**
* Update preintegrated measurements and get derivatives
* It takes measured quantities in the j frame
* Modifies preintegrated quantities in place after correcting for bias and possibly sensor pose
*/
virtual void update(const Vector3& measuredAcc, const Vector3& measuredOmega,
const double dt, Matrix9* A, Matrix93* B, Matrix93* C)=0;
const double dt, Matrix9* A, Matrix93* B, Matrix93* C) = 0;
/// Version without derivatives
virtual void integrateMeasurement(const Vector3& measuredAcc,
@ -169,7 +177,7 @@ public:
/// Given the estimate of the bias, return a NavState tangent vector
/// summarizing the preintegrated IMU measurements so far
virtual Vector9 biasCorrectedDelta(const imuBias::ConstantBias& bias_i,
OptionalJacobian<9, 6> H = boost::none) const=0;
OptionalJacobian<9, 6> H = boost::none) const = 0;
/// Predict state at time j
NavState predict(const NavState& state_i, const imuBias::ConstantBias& bias_i,
@ -182,7 +190,10 @@ public:
OptionalJacobian<9, 9> H1, OptionalJacobian<9, 9> H2,
OptionalJacobian<9, 6> H3) const;
/// Compute errors w.r.t. preintegrated measurements and jacobians wrt pose_i, vel_i, bias_i, pose_j, bias_j
/**
* Compute errors w.r.t. preintegrated measurements and jacobians
* wrt pose_i, vel_i, bias_i, pose_j, bias_j
*/
Vector9 computeErrorAndJacobians(const Pose3& pose_i, const Vector3& vel_i,
const Pose3& pose_j, const Vector3& vel_j,
const imuBias::ConstantBias& bias_i, OptionalJacobian<9, 6> H1 =
@ -202,8 +213,8 @@ public:
/// @}
#endif
public:
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
} /// namespace gtsam
} /// namespace gtsam

View File

@ -24,12 +24,15 @@ namespace gtsam {
/// Simple trajectory simulator.
class Scenario {
public:
/// virtual destructor
virtual ~Scenario() {}
// Quantities a Scenario needs to specify:
virtual Pose3 pose(double t) const = 0;
virtual Vector3 omega_b(double t) const = 0;
virtual Vector3 velocity_n(double t) const = 0;
virtual Vector3 acceleration_n(double t) const = 0;
virtual Pose3 pose(double t) const = 0; ///< pose at time t
virtual Vector3 omega_b(double t) const = 0; ///< angular velocity in body frame
virtual Vector3 velocity_n(double t) const = 0; ///< velocity at time t, in nav frame
virtual Vector3 acceleration_n(double t) const = 0; ///< acceleration in nav frame
// Derived quantities:

View File

@ -266,6 +266,13 @@ class ExpressionFactor2 : public ExpressionFactor<T> {
virtual Expression<T> expression() const {
return expression(this->keys_[0], this->keys_[1]);
}
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar& boost::serialization::make_nvp(
"ExpressionFactor", boost::serialization::base_object<ExpressionFactor<T> >(*this));
}
};
// ExpressionFactor2

View File

@ -385,6 +385,17 @@ namespace gtsam {
ConstFiltered<ValueType>
filter(const boost::function<bool(Key)>& filterFcn = &_truePredicate<Key>) const;
// Count values of given type \c ValueType
template<class ValueType>
size_t count() const {
size_t i = 0;
for (const auto& key_value : *this) {
if (dynamic_cast<const GenericValue<ValueType>*>(&key_value.value))
++i;
}
return i;
}
private:
// Filters based on ValueType (if not Value) and also based on the user-
// supplied \c filter function.

View File

@ -383,6 +383,8 @@ TEST(Values, filter) {
++ i;
}
EXPECT_LONGS_EQUAL(2, (long)i);
EXPECT_LONGS_EQUAL(2, (long)values.count<Pose3>());
EXPECT_LONGS_EQUAL(2, (long)values.count<Pose2>());
// construct a values with the view
Values actualSubValues2(pose_filtered);

View File

@ -60,6 +60,14 @@ struct BearingFactor : public ExpressionFactor2<T, A1, A2> {
std::cout << s << "BearingFactor" << std::endl;
Base::print(s, kf);
}
private:
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar& boost::serialization::make_nvp(
"Base", boost::serialization::base_object<Base>(*this));
}
}; // BearingFactor
/// traits

View File

@ -73,6 +73,14 @@ class BearingRangeFactor
Base::print(s, kf);
}
private:
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar& boost::serialization::make_nvp(
"Base", boost::serialization::base_object<Base>(*this));
}
}; // BearingRangeFactor
/// traits

View File

@ -65,6 +65,14 @@ class RangeFactor : public ExpressionFactor2<T, A1, A2> {
std::cout << s << "RangeFactor" << std::endl;
Base::print(s, kf);
}
private:
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar& boost::serialization::make_nvp(
"Base", boost::serialization::base_object<Base>(*this));
}
}; // \ RangeFactor
/// traits

View File

@ -432,23 +432,27 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate,
fstream stream(filename.c_str(), fstream::out);
// save 2D & 3D poses
Values::ConstFiltered<Pose2> viewPose2 = estimate.filter<Pose2>();
for(const Values::ConstFiltered<Pose2>::KeyValuePair& key_value: viewPose2) {
stream << "VERTEX_SE2 " << key_value.key << " " << key_value.value.x() << " "
<< key_value.value.y() << " " << key_value.value.theta() << endl;
for (const auto& key_value : estimate) {
auto p = dynamic_cast<const GenericValue<Pose2>*>(&key_value.value);
if (!p) continue;
const Pose2& pose = p->value();
stream << "VERTEX_SE2 " << key_value.key << " " << pose.x() << " "
<< pose.y() << " " << pose.theta() << endl;
}
Values::ConstFiltered<Pose3> viewPose3 = estimate.filter<Pose3>();
for(const Values::ConstFiltered<Pose3>::KeyValuePair& key_value: viewPose3) {
Point3 p = key_value.value.translation();
Rot3 R = key_value.value.rotation();
stream << "VERTEX_SE3:QUAT " << key_value.key << " " << p.x() << " " << p.y() << " " << p.z()
for(const auto& key_value: estimate) {
auto p = dynamic_cast<const GenericValue<Pose3>*>(&key_value.value);
if (!p) continue;
const Pose3& pose = p->value();
Point3 t = pose.translation();
Rot3 R = pose.rotation();
stream << "VERTEX_SE3:QUAT " << key_value.key << " " << t.x() << " " << t.y() << " " << t.z()
<< " " << R.toQuaternion().x() << " " << R.toQuaternion().y() << " " << R.toQuaternion().z()
<< " " << R.toQuaternion().w() << endl;
}
// save edges (2D or 3D)
for(boost::shared_ptr<NonlinearFactor> factor_: graph) {
for(const auto& factor_: graph) {
boost::shared_ptr<BetweenFactor<Pose2> > factor =
boost::dynamic_pointer_cast<BetweenFactor<Pose2> >(factor_);
if (factor){
@ -857,48 +861,47 @@ bool writeBAL(const string& filename, SfM_data &data) {
bool writeBALfromValues(const string& filename, const SfM_data &data,
Values& values) {
using Camera = PinholeCamera<Cal3Bundler>;
SfM_data dataValues = data;
// Store poses or cameras in SfM_data
Values valuesPoses = values.filter<Pose3>();
if (valuesPoses.size() == dataValues.number_cameras()) { // we only estimated camera poses
size_t nrPoses = values.count<Pose3>();
if (nrPoses == dataValues.number_cameras()) { // we only estimated camera poses
for (size_t i = 0; i < dataValues.number_cameras(); i++) { // for each camera
Key poseKey = symbol('x', i);
Pose3 pose = values.at<Pose3>(poseKey);
Cal3Bundler K = dataValues.cameras[i].calibration();
PinholeCamera<Cal3Bundler> camera(pose, K);
Camera camera(pose, K);
dataValues.cameras[i] = camera;
}
} else {
Values valuesCameras = values.filter<PinholeCamera<Cal3Bundler> >();
if (valuesCameras.size() == dataValues.number_cameras()) { // we only estimated camera poses and calibration
for (size_t i = 0; i < dataValues.number_cameras(); i++) { // for each camera
size_t nrCameras = values.count<Camera>();
if (nrCameras == dataValues.number_cameras()) { // we only estimated camera poses and calibration
for (size_t i = 0; i < nrCameras; i++) { // for each camera
Key cameraKey = i; // symbol('c',i);
PinholeCamera<Cal3Bundler> camera =
values.at<PinholeCamera<Cal3Bundler> >(cameraKey);
Camera camera = values.at<Camera>(cameraKey);
dataValues.cameras[i] = camera;
}
} else {
cout
<< "writeBALfromValues: different number of cameras in SfM_dataValues (#cameras= "
<< "writeBALfromValues: different number of cameras in SfM_dataValues (#cameras "
<< dataValues.number_cameras() << ") and values (#cameras "
<< valuesPoses.size() << ", #poses " << valuesCameras.size() << ")!!"
<< nrPoses << ", #poses " << nrCameras << ")!!"
<< endl;
return false;
}
}
// Store 3D points in SfM_data
Values valuesPoints = values.filter<Point3>();
if (valuesPoints.size() != dataValues.number_tracks()) {
size_t nrPoints = values.count<Point3>(), nrTracks = dataValues.number_tracks();
if (nrPoints != nrTracks) {
cout
<< "writeBALfromValues: different number of points in SfM_dataValues (#points= "
<< dataValues.number_tracks() << ") and values (#points "
<< valuesPoints.size() << ")!!" << endl;
<< nrTracks << ") and values (#points "
<< nrPoints << ")!!" << endl;
}
for (size_t j = 0; j < dataValues.number_tracks(); j++) { // for each point
for (size_t j = 0; j < nrTracks; j++) { // for each point
Key pointKey = P(j);
if (values.exists(pointKey)) {
Point3 point = values.at<Point3>(pointKey);

View File

@ -47,15 +47,15 @@ namespace gtsam { namespace partition {
toErase.push_back(itFactor); nrFactors--; continue;
}
size_t label1 = dsf.findSet(key1.index);
size_t label2 = dsf.findSet(key2.index);
size_t label1 = dsf.find(key1.index);
size_t label2 = dsf.find(key2.index);
if (label1 == label2) { toErase.push_back(itFactor); nrFactors--; continue; }
// merge two trees if the connection is strong enough, otherwise cache it
// an odometry factor always merges two islands
if (key1.type == NODE_POSE_2D && key2.type == NODE_POSE_2D) {
toErase.push_back(itFactor); nrFactors--;
dsf.makeUnionInPlace(label1, label2);
dsf.merge(label1, label2);
succeed = true;
break;
}
@ -64,7 +64,7 @@ namespace gtsam { namespace partition {
if ((dsf.isSingleton(label1)==1 && key1.type == NODE_LANDMARK_2D) ||
(dsf.isSingleton(label2)==1 && key2.type == NODE_LANDMARK_2D)) {
toErase.push_back(itFactor); nrFactors--;
dsf.makeUnionInPlace(label1, label2);
dsf.merge(label1, label2);
succeed = true;
break;
}
@ -87,7 +87,7 @@ namespace gtsam { namespace partition {
} else {
toErase.push_back(itFactor); nrFactors--;
toErase.push_back(itCached->second); nrFactors--;
dsf.makeUnionInPlace(label1, label2);
dsf.merge(label1, label2);
connections.erase(itCached);
succeed = true;
break;
@ -150,8 +150,8 @@ namespace gtsam { namespace partition {
}
if (graph.size() == 178765) cout << "kai22" << endl;
size_t label1 = dsf.findSet(key1.index);
size_t label2 = dsf.findSet(key2.index);
size_t label1 = dsf.find(key1.index);
size_t label2 = dsf.find(key2.index);
if (label1 == label2) { toErase.push_back(itFactor); nrFactors--; continue; }
if (graph.size() == 178765) cout << "kai23" << endl;
@ -160,7 +160,7 @@ namespace gtsam { namespace partition {
if ((key1.type == NODE_POSE_3D && key2.type == NODE_LANDMARK_3D) ||
(key1.type == NODE_POSE_3D && key2.type == NODE_POSE_3D)) {
toErase.push_back(itFactor); nrFactors--;
dsf.makeUnionInPlace(label1, label2);
dsf.merge(label1, label2);
succeed = true;
break;
}

View File

@ -70,8 +70,8 @@ typedef NonlinearEquality<CalibratedCamera> NonlinearEqualityCalibratedCamera;
typedef NonlinearEquality<SimpleCamera> NonlinearEqualitySimpleCamera;
typedef NonlinearEquality<StereoCamera> NonlinearEqualityStereoCamera;
typedef RangeFactor<Pose2, Point2> RangeFactorPosePoint2;
typedef RangeFactor<Pose3, Point3> RangeFactorPosePoint3;
typedef RangeFactor<Pose2, Point2> RangeFactor2D;
typedef RangeFactor<Pose3, Point3> RangeFactor3D;
typedef RangeFactor<Pose2, Pose2> RangeFactorPose2;
typedef RangeFactor<Pose3, Pose3> RangeFactorPose3;
typedef RangeFactor<CalibratedCamera, Point3> RangeFactorCalibratedCameraPoint;
@ -172,8 +172,8 @@ BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCalibratedCamera, "gtsam::NonlinearEqua
BOOST_CLASS_EXPORT_GUID(NonlinearEqualitySimpleCamera, "gtsam::NonlinearEqualitySimpleCamera");
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoCamera, "gtsam::NonlinearEqualityStereoCamera");
BOOST_CLASS_EXPORT_GUID(RangeFactorPosePoint2, "gtsam::RangeFactorPosePoint2");
BOOST_CLASS_EXPORT_GUID(RangeFactorPosePoint3, "gtsam::RangeFactorPosePoint3");
BOOST_CLASS_EXPORT_GUID(RangeFactor2D, "gtsam::RangeFactor2D");
BOOST_CLASS_EXPORT_GUID(RangeFactor3D, "gtsam::RangeFactor3D");
BOOST_CLASS_EXPORT_GUID(RangeFactorPose2, "gtsam::RangeFactorPose2");
BOOST_CLASS_EXPORT_GUID(RangeFactorPose3, "gtsam::RangeFactorPose3");
BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCameraPoint, "gtsam::RangeFactorCalibratedCameraPoint");

View File

@ -2258,8 +2258,8 @@ virtual class RangeFactor : gtsam::NoiseModelFactor {
RangeFactor(size_t key1, size_t key2, double measured, const gtsam::noiseModel::Base* noiseModel);
};
typedef gtsam::RangeFactor<gtsam::Pose2, gtsam::Point2> RangeFactorPosePoint2;
typedef gtsam::RangeFactor<gtsam::Pose3, gtsam::Point3> RangeFactorPosePoint3;
typedef gtsam::RangeFactor<gtsam::Pose2, gtsam::Point2> RangeFactor2D;
typedef gtsam::RangeFactor<gtsam::Pose3, gtsam::Point3> RangeFactor3D;
typedef gtsam::RangeFactor<gtsam::Pose2, gtsam::Pose2> RangeFactorPose2;
typedef gtsam::RangeFactor<gtsam::Pose3, gtsam::Pose3> RangeFactorPose3;
typedef gtsam::RangeFactor<gtsam::CalibratedCamera, gtsam::Point3> RangeFactorCalibratedCameraPoint;

View File

@ -142,8 +142,8 @@
% RangeFactorCalibratedCameraPoint - class RangeFactorCalibratedCameraPoint, see Doxygen page for details
% RangeFactorPose2 - class RangeFactorPose2, see Doxygen page for details
% RangeFactorPose3 - class RangeFactorPose3, see Doxygen page for details
% RangeFactorPosePoint2 - class RangeFactorPosePoint2, see Doxygen page for details
% RangeFactorPosePoint3 - class RangeFactorPosePoint3, see Doxygen page for details
% RangeFactor2D - class RangeFactor2D, see Doxygen page for details
% RangeFactor3D - class RangeFactor3D, see Doxygen page for details
% RangeFactorSimpleCamera - class RangeFactorSimpleCamera, see Doxygen page for details
% RangeFactorSimpleCameraPoint - class RangeFactorSimpleCameraPoint, see Doxygen page for details
% VisualISAMGenerateData - VisualISAMGenerateData creates data for viusalSLAM::iSAM examples

View File

@ -125,7 +125,7 @@ for i=1:M % M
j = TD(k,3);
range = TD(k,4);
if addRange
factor = RangeFactorPosePoint2(i, symbol('L',j), range, noiseModels.range);
factor = RangeFactor2D(i, symbol('L',j), range, noiseModels.range);
% Throw out obvious outliers based on current landmark estimates
error=factor.unwhitenedError(landmarkEstimates);
if k<=minK || abs(error)<5
@ -146,14 +146,14 @@ for i=1:M % M
end
isam.update(newFactors, initial);
result = isam.calculateEstimate();
lastPose = result.at(i);
lastPose = result.atPose2(i);
% update landmark estimates
if addRange
landmarkEstimates = Values;
for jj=1:size(TL,1)
j=TL(jj,1);
key = symbol('L',j);
landmarkEstimates.insert(key,result.at(key));
landmarkEstimates.insert(key,result.atPoint2(key));
end
end
newFactors = NonlinearFactorGraph;

View File

@ -76,7 +76,7 @@ for i=1:M
while k<=K && t>=TD(k,1)
j = TD(k,3);
range = TD(k,4);
factor = RangeFactorPosePoint2(i, symbol('L',j), range, noiseModels.range);
factor = RangeFactor2D(i, symbol('L',j), range, noiseModels.range);
graph.add(factor);
k=k+1;
end

View File

@ -64,7 +64,7 @@ for i=1:size(truth.cameras,2)
initialEstimate.insert(symbol('c',i), camera_i);
end
for j=1:size(truth.points,2)
point_j = truth.points{j}.retract(0.1*randn(3,1));
point_j = Point3(truth.points{j}.vector() + 0.1*randn(3,1));
initialEstimate.insert(symbol('p',j), point_j);
end
initialEstimate.print(sprintf('\nInitial estimate:\n '));

View File

@ -58,7 +58,7 @@ for i=1:size(truth.cameras,2)
initialEstimate.insert(symbol('x',i), pose_i);
end
for j=1:size(truth.points,2)
point_j = truth.points{j}.retract(0.1*randn(3,1));
point_j = Point3(truth.points{j}.vector() + 0.1*randn(3,1));
initialEstimate.insert(symbol('p',j), point_j);
end
initialEstimate.print(sprintf('\nInitial estimate:\n '));

View File

@ -54,7 +54,8 @@ S13 = [
+0.00,-8.94427
];
d=[2.23607;-1.56525];
expectedCG = GaussianConditional(x2,d,R11,l1,S12,x1,S13);
unit2 = noiseModel.Unit.Create(2);
expectedCG = GaussianConditional(x2,d,R11,l1,S12,x1,S13,unit2);
% check if the result matches
CHECK('actualCG.equals(expectedCG,1e-5)',actualCG.equals(expectedCG,1e-4));

View File

@ -52,14 +52,26 @@ priorMean = Pose2(0.0, 0.0, 0.0); % prior at origin
priorNoise = noiseModel.Diagonal.Sigmas([0.3; 0.3; 0.1]);
graph.add(PriorFactorPose2(i1, priorMean, priorNoise)); % add directly to graph
% Between Factors - FAIL: unregistered class
% Between Factors
odometry = Pose2(2.0, 0.0, 0.0);
odometryNoise = noiseModel.Diagonal.Sigmas([0.2; 0.2; 0.1]);
graph.add(BetweenFactorPose2(i1, i2, odometry, odometryNoise));
graph.add(BetweenFactorPose2(i2, i3, odometry, odometryNoise));
% BearingRange Factors - FAIL: unregistered class
% Range Factors
rNoise = noiseModel.Diagonal.Sigmas([0.2]);
graph.add(RangeFactor2D(i1, j1, sqrt(4+4), rNoise));
graph.add(RangeFactor2D(i2, j1, 2, rNoise));
graph.add(RangeFactor2D(i3, j2, 2, rNoise));
% Bearing Factors
degrees = pi/180;
bNoise = noiseModel.Diagonal.Sigmas([0.1]);
graph.add(BearingFactor2D(i1, j1, Rot2(45*degrees), bNoise));
graph.add(BearingFactor2D(i2, j1, Rot2(90*degrees), bNoise));
graph.add(BearingFactor2D(i3, j2, Rot2(90*degrees), bNoise));
% BearingRange Factors
brNoise = noiseModel.Diagonal.Sigmas([0.1; 0.2]);
graph.add(BearingRangeFactor2D(i1, j1, Rot2(45*degrees), sqrt(4+4), brNoise));
graph.add(BearingRangeFactor2D(i2, j1, Rot2(90*degrees), 2, brNoise));

View File

@ -103,9 +103,9 @@ for ind_pose = 2:7
r2 = curr_pose.range(lmk1); % range of lmk1 wrt x2
srf1.addRange(key_curr, r2);
rangef1 = RangeFactorPosePoint2(key_prev, lmkKey(1), r1, noiseRange);
rangef1 = RangeFactor2D(key_prev, lmkKey(1), r1, noiseRange);
fullGraph.add(rangef1);
rangef2 = RangeFactorPosePoint2(key_curr, lmkKey(1), r2, noiseRange);
rangef2 = RangeFactor2D(key_curr, lmkKey(1), r2, noiseRange);
fullGraph.add(rangef2);
if goodInitFlag_lmk1==1
@ -123,9 +123,9 @@ for ind_pose = 2:7
r4 = curr_pose.range(lmk2); % range of lmk2 wrt x3
srf2.addRange(key_curr, r4);
rangef3 = RangeFactorPosePoint2(key_curr, lmkKey(1), r3, noiseRange);
rangef3 = RangeFactor2D(key_curr, lmkKey(1), r3, noiseRange);
fullGraph.add(rangef3);
rangef4 = RangeFactorPosePoint2(key_curr, lmkKey(2), r4, noiseRange);
rangef4 = RangeFactor2D(key_curr, lmkKey(2), r4, noiseRange);
% IF WE ADD FACTOR HERE IT CRASHES: fullGraph.add(rangef4);
%====================================================================
case 4
@ -138,9 +138,9 @@ for ind_pose = 2:7
% DELAYED INITIALIZATION:
fullGraph.add(rangef4);
rangef5 = RangeFactorPosePoint2(key_curr, lmkKey(2), r5, noiseRange);
rangef5 = RangeFactor2D(key_curr, lmkKey(2), r5, noiseRange);
fullGraph.add(rangef5);
rangef6 = RangeFactorPosePoint2(key_curr, lmkKey(3), r6, noiseRange);
rangef6 = RangeFactor2D(key_curr, lmkKey(3), r6, noiseRange);
% IF WE ADD FACTOR HERE IT CRASHES: fullGraph.add(rangef6);
if goodInitFlag_lmk2==1
@ -160,9 +160,9 @@ for ind_pose = 2:7
% DELAYED INITIALIZATION:
fullGraph.add(rangef6);
rangef7 = RangeFactorPosePoint2(key_curr, lmkKey(2), r7, noiseRange);
rangef7 = RangeFactor2D(key_curr, lmkKey(2), r7, noiseRange);
fullGraph.add(rangef7);
rangef8 = RangeFactorPosePoint2(key_curr, lmkKey(3), r8, noiseRange);
rangef8 = RangeFactor2D(key_curr, lmkKey(3), r8, noiseRange);
fullGraph.add(rangef8);
if goodInitFlag_lmk3==1
@ -176,7 +176,7 @@ for ind_pose = 2:7
r9 = curr_pose.range(lmk3); % range of lmk3 wrt x6
srf3.addRange(key_curr, r9);
rangef9 = RangeFactorPosePoint2(key_curr, lmkKey(3), r9, noiseRange);
rangef9 = RangeFactor2D(key_curr, lmkKey(3), r9, noiseRange);
fullGraph.add(rangef9);
case 7
% x6-lmk3
@ -184,7 +184,7 @@ for ind_pose = 2:7
srf3.addRange(key_curr, r10);
smartGraph.add(srf3);
rangef10 = RangeFactorPosePoint2(key_curr, lmkKey(3), r10, noiseRange);
rangef10 = RangeFactor2D(key_curr, lmkKey(3), r10, noiseRange);
fullGraph.add(rangef10);
end

View File

@ -96,8 +96,8 @@ typedef NonlinearEquality<CalibratedCamera> NonlinearEqualityCalibratedCamera;
typedef NonlinearEquality<SimpleCamera> NonlinearEqualitySimpleCamera;
typedef NonlinearEquality<StereoCamera> NonlinearEqualityStereoCamera;
typedef RangeFactor<Pose2, Point2> RangeFactorPosePoint2;
typedef RangeFactor<Pose3, Point3> RangeFactorPosePoint3;
typedef RangeFactor<Pose2, Point2> RangeFactor2D;
typedef RangeFactor<Pose3, Point3> RangeFactor3D;
typedef RangeFactor<Pose2, Pose2> RangeFactorPose2;
typedef RangeFactor<Pose3, Pose3> RangeFactorPose3;
typedef RangeFactor<CalibratedCamera, Point3> RangeFactorCalibratedCameraPoint;
@ -204,8 +204,8 @@ BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCalibratedCamera, "gtsam::NonlinearEqua
BOOST_CLASS_EXPORT_GUID(NonlinearEqualitySimpleCamera, "gtsam::NonlinearEqualitySimpleCamera");
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoCamera, "gtsam::NonlinearEqualityStereoCamera");
BOOST_CLASS_EXPORT_GUID(RangeFactorPosePoint2, "gtsam::RangeFactorPosePoint2");
BOOST_CLASS_EXPORT_GUID(RangeFactorPosePoint3, "gtsam::RangeFactorPosePoint3");
BOOST_CLASS_EXPORT_GUID(RangeFactor2D, "gtsam::RangeFactor2D");
BOOST_CLASS_EXPORT_GUID(RangeFactor3D, "gtsam::RangeFactor3D");
BOOST_CLASS_EXPORT_GUID(RangeFactorPose2, "gtsam::RangeFactorPose2");
BOOST_CLASS_EXPORT_GUID(RangeFactorPose3, "gtsam::RangeFactorPose3");
BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCameraPoint, "gtsam::RangeFactorCalibratedCameraPoint");
@ -378,8 +378,8 @@ TEST (testSerializationSLAM, factors) {
NonlinearEqualitySimpleCamera nonlinearEqualitySimpleCamera(a13, simpleCamera);
NonlinearEqualityStereoCamera nonlinearEqualityStereoCamera(a14, stereoCamera);
RangeFactorPosePoint2 rangeFactorPosePoint2(a08, a03, 2.0, model1);
RangeFactorPosePoint3 rangeFactorPosePoint3(a09, a05, 2.0, model1);
RangeFactor2D rangeFactor2D(a08, a03, 2.0, model1);
RangeFactor3D rangeFactor3D(a09, a05, 2.0, model1);
RangeFactorPose2 rangeFactorPose2(a08, b08, 2.0, model1);
RangeFactorPose3 rangeFactorPose3(a09, b09, 2.0, model1);
RangeFactorCalibratedCameraPoint rangeFactorCalibratedCameraPoint(a12, a05, 2.0, model1);
@ -439,8 +439,8 @@ TEST (testSerializationSLAM, factors) {
graph += nonlinearEqualitySimpleCamera;
graph += nonlinearEqualityStereoCamera;
graph += rangeFactorPosePoint2;
graph += rangeFactorPosePoint3;
graph += rangeFactor2D;
graph += rangeFactor3D;
graph += rangeFactorPose2;
graph += rangeFactorPose3;
graph += rangeFactorCalibratedCameraPoint;
@ -505,8 +505,8 @@ TEST (testSerializationSLAM, factors) {
EXPECT(equalsObj<NonlinearEqualitySimpleCamera>(nonlinearEqualitySimpleCamera));
EXPECT(equalsObj<NonlinearEqualityStereoCamera>(nonlinearEqualityStereoCamera));
EXPECT(equalsObj<RangeFactorPosePoint2>(rangeFactorPosePoint2));
EXPECT(equalsObj<RangeFactorPosePoint3>(rangeFactorPosePoint3));
EXPECT(equalsObj<RangeFactor2D>(rangeFactor2D));
EXPECT(equalsObj<RangeFactor3D>(rangeFactor3D));
EXPECT(equalsObj<RangeFactorPose2>(rangeFactorPose2));
EXPECT(equalsObj<RangeFactorPose3>(rangeFactorPose3));
EXPECT(equalsObj<RangeFactorCalibratedCameraPoint>(rangeFactorCalibratedCameraPoint));
@ -571,8 +571,8 @@ TEST (testSerializationSLAM, factors) {
EXPECT(equalsXML<NonlinearEqualitySimpleCamera>(nonlinearEqualitySimpleCamera));
EXPECT(equalsXML<NonlinearEqualityStereoCamera>(nonlinearEqualityStereoCamera));
EXPECT(equalsXML<RangeFactorPosePoint2>(rangeFactorPosePoint2));
EXPECT(equalsXML<RangeFactorPosePoint3>(rangeFactorPosePoint3));
EXPECT(equalsXML<RangeFactor2D>(rangeFactor2D));
EXPECT(equalsXML<RangeFactor3D>(rangeFactor3D));
EXPECT(equalsXML<RangeFactorPose2>(rangeFactorPose2));
EXPECT(equalsXML<RangeFactorPose3>(rangeFactorPose3));
EXPECT(equalsXML<RangeFactorCalibratedCameraPoint>(rangeFactorCalibratedCameraPoint));
@ -637,8 +637,8 @@ TEST (testSerializationSLAM, factors) {
EXPECT(equalsBinary<NonlinearEqualitySimpleCamera>(nonlinearEqualitySimpleCamera));
EXPECT(equalsBinary<NonlinearEqualityStereoCamera>(nonlinearEqualityStereoCamera));
EXPECT(equalsBinary<RangeFactorPosePoint2>(rangeFactorPosePoint2));
EXPECT(equalsBinary<RangeFactorPosePoint3>(rangeFactorPosePoint3));
EXPECT(equalsBinary<RangeFactor2D>(rangeFactor2D));
EXPECT(equalsBinary<RangeFactor3D>(rangeFactor3D));
EXPECT(equalsBinary<RangeFactorPose2>(rangeFactorPose2));
EXPECT(equalsBinary<RangeFactorPose3>(rangeFactorPose3));
EXPECT(equalsBinary<RangeFactorCalibratedCameraPoint>(rangeFactorCalibratedCameraPoint));

127
tests/testVisualISAM2.cpp Normal file
View File

@ -0,0 +1,127 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file testVisualISAM2.cpp
* @brief Test convergence of visualSLAM example.
* @author Duy-Nguyen Ta
* @author Frank Dellaert
*/
#include <CppUnitLite/TestHarness.h>
#include <examples/SFMdata.h>
#include <gtsam/geometry/Point2.h>
#include <gtsam/inference/Symbol.h>
#include <gtsam/nonlinear/ISAM2.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/Values.h>
#include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/ProjectionFactor.h>
#include <vector>
using namespace std;
using namespace gtsam;
/* ************************************************************************* */
TEST(testVisualISAM2, all)
{
Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0));
auto measurementNoise = noiseModel::Isotropic::Sigma(2, 1.0);
// Create ground truth data
vector<Point3> points = createPoints();
vector<Pose3> poses = createPoses();
// Set the parameters
ISAM2Params parameters;
parameters.relinearizeThreshold = 0.01;
parameters.relinearizeSkip = 1;
ISAM2 isam(parameters);
// Create a Factor Graph and Values to hold the new data
NonlinearFactorGraph graph;
Values initialEstimate;
// Loop over the poses, adding the observations to iSAM incrementally
for (size_t i = 0; i < poses.size(); ++i)
{
// Add factors for each landmark observation
for (size_t j = 0; j < points.size(); ++j)
{
SimpleCamera camera(poses[i], *K);
Point2 measurement = camera.project(points[j]);
graph.emplace_shared<GenericProjectionFactor<Pose3, Point3, Cal3_S2>>(
measurement, measurementNoise, Symbol('x', i), Symbol('l', j), K);
}
// Add an initial guess for the current pose
// Intentionally initialize the variables off from the ground truth
static Pose3 kDeltaPose(Rot3::Rodrigues(-0.1, 0.2, 0.25),
Point3(0.05, -0.10, 0.20));
initialEstimate.insert(Symbol('x', i), poses[i] * kDeltaPose);
// Treat first iteration as special case
if (i == 0)
{
// Add a prior on pose x0, 30cm std on x,y,z and 0.1 rad on roll,pitch,yaw
static auto kPosePrior = noiseModel::Diagonal::Sigmas(
(Vector(6) << Vector3::Constant(0.3), Vector3::Constant(0.1))
.finished());
graph.emplace_shared<PriorFactor<Pose3>>(Symbol('x', 0), poses[0],
kPosePrior);
// Add a prior on landmark l0
static auto kPointPrior = noiseModel::Isotropic::Sigma(3, 0.1);
graph.emplace_shared<PriorFactor<Point3>>(Symbol('l', 0), points[0],
kPointPrior);
// Add initial guesses to all observed landmarks
// Intentionally initialize the variables off from the ground truth
static Point3 kDeltaPoint(-0.25, 0.20, 0.15);
for (size_t j = 0; j < points.size(); ++j)
initialEstimate.insert<Point3>(Symbol('l', j), points[j] + kDeltaPoint);
}
else
{
// Update iSAM with the new factors
isam.update(graph, initialEstimate);
// Do an extra update to converge withing these 8 iterations
isam.update();
// Optimize
Values currentEstimate = isam.calculateEstimate();
// reset for next iteration
graph.resize(0);
initialEstimate.clear();
}
} // for loop
auto result = isam.calculateEstimate();
EXPECT_LONGS_EQUAL(16, result.size());
for (size_t j = 0; j < points.size(); ++j)
{
Symbol key('l', j);
EXPECT(assert_equal(points[j], result.at<Point3>(key), 0.01));
}
}
/* ************************************************************************* */
int main()
{
TestResult tr;
return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */

View File

@ -127,7 +127,7 @@ void Module::parseMarkup(const std::string& data) {
TemplateInstantiationTypedef singleInstantiation, singleInstantiation0;
TypeListGrammar<'<','>'> typelist_g(singleInstantiation.typeList);
// typedef gtsam::RangeFactor<gtsam::Pose2, gtsam::Point2> RangeFactorPosePoint2;
// typedef gtsam::RangeFactor<gtsam::Pose2, gtsam::Point2> RangeFactor2D;
TypeGrammar instantiationClass_g(singleInstantiation.class_);
Rule templateSingleInstantiation_p =
(str_p("typedef") >> instantiationClass_g >>
@ -273,9 +273,9 @@ void Module::generate_matlab_wrapper(const string& toolboxPath) const {
// Include boost.serialization archive headers before other class headers
if (hasSerialiable) {
wrapperFile.oss << "#include <boost/serialization/export.hpp>\n";
wrapperFile.oss << "#include <boost/archive/text_iarchive.hpp>\n";
wrapperFile.oss << "#include <boost/archive/text_oarchive.hpp>\n\n";
wrapperFile.oss << "#include <boost/archive/text_oarchive.hpp>\n";
wrapperFile.oss << "#include <boost/serialization/export.hpp>\n\n";
}
// Generate includes while avoiding redundant includes

View File

@ -1,9 +1,9 @@
#include <wrap/matlab.h>
#include <map>
#include <boost/serialization/export.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/archive/text_oarchive.hpp>
#include <boost/serialization/export.hpp>
#include <folder/path/to/Test.h>
#include <gtsam/geometry/Point2.h>