Updated the VisualSLAM examples, removing the SLAM namespaces
parent
5da5adb2f1
commit
67e2d832fe
|
@ -11,99 +11,139 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @file VisualISAMExample.cpp
|
* @file VisualISAMExample.cpp
|
||||||
* @brief An ISAM example for synthesis sequence, single camera
|
* @brief A visualSLAM example for the structure-from-motion problem on a simulated dataset
|
||||||
|
* This version uses iSAM to solve the problem incrementally
|
||||||
* @author Duy-Nguyen Ta
|
* @author Duy-Nguyen Ta
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A structure-from-motion example with landmarks
|
||||||
|
* - The landmarks form a 10 meter cube
|
||||||
|
* - The robot rotates around the landmarks, always facing towards the cube
|
||||||
|
*/
|
||||||
|
|
||||||
|
// As this is a full 3D problem, we will use Pose3 variables to represent the camera
|
||||||
|
// positions and Point3 variables (x, y, z) to represent the landmark coordinates.
|
||||||
|
// Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y).
|
||||||
|
// We will also need a camera object to hold calibration information and perform projections.
|
||||||
|
#include <gtsam/geometry/Pose3.h>
|
||||||
|
#include <gtsam/geometry/Point3.h>
|
||||||
|
#include <gtsam/geometry/Point2.h>
|
||||||
|
#include <gtsam/geometry/SimpleCamera.h>
|
||||||
|
|
||||||
|
// Each variable in the system (poses and landmarks) must be identified with a unique key.
|
||||||
|
// We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1).
|
||||||
|
// Here we will use Symbols
|
||||||
#include <gtsam/nonlinear/Symbol.h>
|
#include <gtsam/nonlinear/Symbol.h>
|
||||||
|
|
||||||
|
// In GTSAM, measurement functions are represented as 'factors'. Several common factors
|
||||||
|
// have been provided with the library for solving robotics/SLAM/Bundle Adjustment problems.
|
||||||
|
// Here we will use Projection factors to model the camera's landmark observations.
|
||||||
|
// Also, we will initialize the robot at some location using a Prior factor.
|
||||||
|
#include <gtsam/slam/PriorFactor.h>
|
||||||
|
#include <gtsam/slam/ProjectionFactor.h>
|
||||||
|
|
||||||
|
// We want to use iSAM to solve the structure-from-motion problem incrementally, so
|
||||||
|
// include iSAM here
|
||||||
#include <gtsam/nonlinear/NonlinearISAM.h>
|
#include <gtsam/nonlinear/NonlinearISAM.h>
|
||||||
#include <gtsam/slam/visualSLAM.h>
|
|
||||||
#include <gtsam/slam/BetweenFactor.h>
|
// iSAM requires as input a set set of new factors to be added stored in a factor graph,
|
||||||
#include "VisualSLAMData.h"
|
// and initial guesses for any new variables used in the added factors
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
|
#include <gtsam/nonlinear/Values.h>
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
|
||||||
// Convenience for named keys
|
|
||||||
using symbol_shorthand::X;
|
|
||||||
using symbol_shorthand::L;
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main(int argc, char* argv[]) {
|
int main(int argc, char* argv[]) {
|
||||||
|
|
||||||
VisualSLAMExampleData data = VisualSLAMExampleData::generate();
|
// Define the camera calibration parameters
|
||||||
|
Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0));
|
||||||
|
|
||||||
/* 1. Create a NonlinearISAM which will be relinearized and reordered after every "relinearizeInterval" updates */
|
// Define the camera observation noise model
|
||||||
int relinearizeInterval = 3;
|
noiseModel::Isotropic::shared_ptr measurementNoise = noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v
|
||||||
NonlinearISAM isam(relinearizeInterval);
|
|
||||||
|
|
||||||
/* 2. At each frame (poseId) with new camera pose and set of associated measurements,
|
// Create the set of ground-truth landmarks
|
||||||
* create a graph of new factors and update ISAM */
|
std::vector<gtsam::Point3> points;
|
||||||
|
points.push_back(gtsam::Point3(10.0,10.0,10.0));
|
||||||
|
points.push_back(gtsam::Point3(-10.0,10.0,10.0));
|
||||||
|
points.push_back(gtsam::Point3(-10.0,-10.0,10.0));
|
||||||
|
points.push_back(gtsam::Point3(10.0,-10.0,10.0));
|
||||||
|
points.push_back(gtsam::Point3(10.0,10.0,-10.0));
|
||||||
|
points.push_back(gtsam::Point3(-10.0,10.0,-10.0));
|
||||||
|
points.push_back(gtsam::Point3(-10.0,-10.0,-10.0));
|
||||||
|
points.push_back(gtsam::Point3(10.0,-10.0,-10.0));
|
||||||
|
|
||||||
// Store the current best estimate from ISAM
|
// Create the set of ground-truth poses
|
||||||
Values currentEstimate;
|
std::vector<gtsam::Pose3> poses;
|
||||||
|
double radius = 30.0;
|
||||||
// First two frames:
|
int i = 0;
|
||||||
// Add factors and initial values for the first two poses and landmarks then update ISAM.
|
double theta = 0.0;
|
||||||
// Note: measurements from the first pose only are not enough to update ISAM:
|
gtsam::Point3 up(0,0,1);
|
||||||
// the system is underconstrained.
|
gtsam::Point3 target(0,0,0);
|
||||||
{
|
for(; i < 8; ++i, theta += 2*M_PI/8) {
|
||||||
visualSLAM::Graph newFactors;
|
gtsam::Point3 position = Point3(radius*cos(theta), radius*sin(theta), 0.0);
|
||||||
|
gtsam::SimpleCamera camera = SimpleCamera::Lookat(position, target, up);
|
||||||
// First pose with prior factor
|
poses.push_back(camera.pose());
|
||||||
newFactors.addPosePrior(X(0), data.poses[0], data.noiseX);
|
|
||||||
|
|
||||||
// Second pose with odometry measurement
|
|
||||||
newFactors.addRelativePose(X(0), X(1), data.odometry, data.noiseX);
|
|
||||||
|
|
||||||
// Visual measurements at both poses
|
|
||||||
for (size_t i=0; i<2; ++i) {
|
|
||||||
for (size_t j=0; j<data.z[i].size(); ++j) {
|
|
||||||
newFactors.addMeasurement(data.z[i][j], data.noiseZ, X(i), L(j), data.sK);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initial values for the first two poses, simulated with Gaussian noise
|
|
||||||
Values initials;
|
|
||||||
initials.insert(X(0), data.poses[0]);
|
|
||||||
initials.insert(X(1), data.poses[0]*data.odometry);
|
|
||||||
|
|
||||||
// Initial values for the landmarks
|
|
||||||
for (size_t j=0; j<data.points.size(); ++j)
|
|
||||||
initials.insert(L(j), data.points[j]);
|
|
||||||
|
|
||||||
// Update ISAM the first time and obtain the current estimate
|
|
||||||
isam.update(newFactors, initials);
|
|
||||||
currentEstimate = isam.estimate();
|
|
||||||
cout << "Frame 0 and 1: " << endl;
|
|
||||||
currentEstimate.print("Current estimate: ");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Subsequent frames: Add new odometry and measurement factors and initial values,
|
// Create a NonlinearISAM object which will relinearize and reorder the variables every "relinearizeInterval" updates
|
||||||
// then update ISAM at each frame
|
int relinearizeInterval = 1;
|
||||||
for (size_t i=2; i<data.poses.size(); ++i) {
|
NonlinearISAM isam(relinearizeInterval);
|
||||||
visualSLAM::Graph newFactors;
|
|
||||||
// Factor for odometry measurements, simulated by adding Gaussian noise to the ground-truth.
|
|
||||||
Pose3 odoMeasurement = data.odometry;
|
|
||||||
newFactors.addRelativePose(X(i-1), X(i), data.odometry, data.noiseX);
|
|
||||||
// Factors for visual measurements
|
|
||||||
for (size_t j=0; j<data.z[i].size(); ++j) {
|
|
||||||
newFactors.addMeasurement(data.z[i][j], data.noiseZ, X(i), L(j), data.sK);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initial estimates for the new node Xi, simulated by Gaussian noises
|
// Create a Factor Graph and Values to hold the new data
|
||||||
Values initials;
|
NonlinearFactorGraph graph;
|
||||||
initials.insert(X(i), currentEstimate.at<Pose3>(X(i-1))*data.odometry);
|
Values initialEstimate;
|
||||||
|
|
||||||
// update ISAM
|
// Loop over the different poses, adding the observations to iSAM incrementally
|
||||||
isam.update(newFactors, initials);
|
for (size_t i = 0; i < poses.size(); ++i) {
|
||||||
currentEstimate = isam.estimate();
|
|
||||||
cout << "****************************************************" << endl;
|
// Add factors for each landmark observation
|
||||||
cout << "Frame " << i << ": " << endl;
|
for (size_t j = 0; j < points.size(); ++j) {
|
||||||
currentEstimate.print("Current estimate: ");
|
SimpleCamera camera(poses[i], *K);
|
||||||
|
Point2 measurement = camera.project(points[j]);
|
||||||
|
graph.add(GenericProjectionFactor<Pose3, Point3, Cal3_S2>(measurement, measurementNoise, Symbol('x', i), Symbol('l', j), K));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an initial guess for the current pose
|
||||||
|
// Intentionally initialize the variables off from the ground truth
|
||||||
|
initialEstimate.insert(Symbol('x', i), poses[i].compose(Pose3(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20))));
|
||||||
|
|
||||||
|
// If this is the first iteration, add a prior on the first pose to set the coordinate frame
|
||||||
|
// and a prior on the first landmark to set the scale
|
||||||
|
// Also, as iSAM solves incrementally, we must wait until each is observed at least twice before
|
||||||
|
// adding it to iSAM.
|
||||||
|
if( i == 0) {
|
||||||
|
// Add a prior on pose x0
|
||||||
|
noiseModel::Diagonal::shared_ptr poseNoise = noiseModel::Diagonal::Sigmas(Vector_(6, 0.3, 0.3, 0.3, 0.1, 0.1, 0.1)); // 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
|
||||||
|
graph.add(PriorFactor<Pose3>(Symbol('x', 0), poses[0], poseNoise));
|
||||||
|
|
||||||
|
// Add a prior on landmark l0
|
||||||
|
noiseModel::Isotropic::shared_ptr pointNoise = noiseModel::Isotropic::Sigma(3, 0.1);
|
||||||
|
graph.add(PriorFactor<Point3>(Symbol('l', 0), points[0], pointNoise)); // add directly to graph
|
||||||
|
|
||||||
|
// Add initial guesses to all observed landmarks
|
||||||
|
// Intentionally initialize the variables off from the ground truth
|
||||||
|
for (size_t j = 0; j < points.size(); ++j)
|
||||||
|
initialEstimate.insert(Symbol('l', j), points[j].compose(Point3(-0.25, 0.20, 0.15)));
|
||||||
|
|
||||||
|
} else {
|
||||||
|
// Update iSAM with the new factors
|
||||||
|
isam.update(graph, initialEstimate);
|
||||||
|
Values currentEstimate = isam.estimate();
|
||||||
|
cout << "****************************************************" << endl;
|
||||||
|
cout << "Frame " << i << ": " << endl;
|
||||||
|
currentEstimate.print("Current estimate: ");
|
||||||
|
|
||||||
|
// Clear the factor graph and values for the next iteration
|
||||||
|
graph.resize(0);
|
||||||
|
initialEstimate.clear();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
|
|
|
@ -1,89 +0,0 @@
|
||||||
/* ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
|
||||||
* Atlanta, Georgia 30332-0415
|
|
||||||
* All Rights Reserved
|
|
||||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
|
||||||
|
|
||||||
* See LICENSE for the license information
|
|
||||||
|
|
||||||
* -------------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @file VisualSLAMData.cpp
|
|
||||||
* @brief Generate ground-truth simulated data for VisualSLAM examples
|
|
||||||
* @author Duy-Nguyen Ta
|
|
||||||
*/
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include <vector>
|
|
||||||
#include <map>
|
|
||||||
#include <gtsam/geometry/SimpleCamera.h>
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
/**
|
|
||||||
* Simulated data for the visual SLAM examples:
|
|
||||||
* - 8 Landmarks: (10,10,10) (-10,10,10) (-10,-10,10) (10,-10,10)
|
|
||||||
* (10,10,-10) (-10,10,-10) (-10,-10,-10) (10,-10,-10)
|
|
||||||
* - n 90-deg-FoV cameras with the same calibration parameters:
|
|
||||||
* f = 50.0, Image: 100x100, center: 50.0, 50.0
|
|
||||||
* and ground-truth poses on a circle around the landmarks looking at the world's origin:
|
|
||||||
* Rot3(-sin(theta), 0, -cos(theta),
|
|
||||||
* cos(theta), 0, -sin(theta),
|
|
||||||
* 0, -1, 0 ),
|
|
||||||
* Point3(r*cos(theta), r*sin(theta), 0.0)
|
|
||||||
* (theta += 2*pi/N)
|
|
||||||
* - Measurement noise: 1 pix sigma
|
|
||||||
*/
|
|
||||||
struct VisualSLAMExampleData {
|
|
||||||
gtsam::shared_ptrK sK; // camera calibration parameters
|
|
||||||
std::vector<gtsam::Pose3> poses; // ground-truth camera poses
|
|
||||||
gtsam::Pose3 odometry; // ground-truth odometry between 2 consecutive poses (simulated data for iSAM)
|
|
||||||
std::vector<gtsam::Point3> points; // ground-truth landmarks
|
|
||||||
std::map<int, std::vector<gtsam::Point2> > z; // 2D measurements of landmarks in each camera frame
|
|
||||||
gtsam::SharedDiagonal noiseZ; // measurement noise (noiseModel::Isotropic::Sigma(2, 5.0f));
|
|
||||||
gtsam::SharedDiagonal noiseX; // noise for camera poses
|
|
||||||
gtsam::SharedDiagonal noiseL; // noise for landmarks
|
|
||||||
|
|
||||||
static const VisualSLAMExampleData generate() {
|
|
||||||
VisualSLAMExampleData data;
|
|
||||||
// Landmarks (ground truth)
|
|
||||||
data.points.push_back(gtsam::Point3(10.0,10.0,10.0));
|
|
||||||
data.points.push_back(gtsam::Point3(-10.0,10.0,10.0));
|
|
||||||
data.points.push_back(gtsam::Point3(-10.0,-10.0,10.0));
|
|
||||||
data.points.push_back(gtsam::Point3(10.0,-10.0,10.0));
|
|
||||||
data.points.push_back(gtsam::Point3(10.0,10.0,-10.0));
|
|
||||||
data.points.push_back(gtsam::Point3(-10.0,10.0,-10.0));
|
|
||||||
data.points.push_back(gtsam::Point3(-10.0,-10.0,-10.0));
|
|
||||||
data.points.push_back(gtsam::Point3(10.0,-10.0,-10.0));
|
|
||||||
|
|
||||||
// Camera calibration parameters
|
|
||||||
data.sK = gtsam::shared_ptrK(new gtsam::Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0));
|
|
||||||
|
|
||||||
// n camera poses
|
|
||||||
int n = 8;
|
|
||||||
double theta = 0.0;
|
|
||||||
double r = 30.0;
|
|
||||||
for (int i=0; i<n; ++i, theta += 2*M_PI/n) {
|
|
||||||
gtsam::Point3 C = gtsam::Point3(r*cos(theta), r*sin(theta), 0.0);
|
|
||||||
gtsam::SimpleCamera camera = gtsam::SimpleCamera::Lookat(C, gtsam::Point3(), gtsam::Point3(0,0,1));
|
|
||||||
data.poses.push_back(camera.pose());
|
|
||||||
}
|
|
||||||
data.odometry = data.poses[0].between(data.poses[1]);
|
|
||||||
|
|
||||||
// Simulated measurements, possibly with Gaussian noise
|
|
||||||
data.noiseZ = gtsam::noiseModel::Isotropic::Sigma(2, 1.0);
|
|
||||||
for (size_t i=0; i<data.poses.size(); ++i) {
|
|
||||||
for (size_t j=0; j<data.points.size(); ++j) {
|
|
||||||
gtsam::SimpleCamera camera(data.poses[i], *data.sK);
|
|
||||||
data.z[i].push_back(camera.project(data.points[j])
|
|
||||||
/*+ gtsam::Point2(data.noiseZ->sample()))*/); // you can add noise as desired
|
|
||||||
}
|
|
||||||
}
|
|
||||||
data.noiseX = gtsam::noiseModel::Diagonal::Sigmas(gtsam::Vector_(6, 0.001, 0.001, 0.001, 0.1, 0.1, 0.1));
|
|
||||||
data.noiseL = gtsam::noiseModel::Isotropic::Sigma(3, 0.1);
|
|
||||||
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
};
|
|
|
@ -15,49 +15,123 @@
|
||||||
* @author Duy-Nguyen Ta
|
* @author Duy-Nguyen Ta
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <gtsam/slam/visualSLAM.h>
|
/**
|
||||||
|
* A structure-from-motion example with landmarks
|
||||||
|
* - The landmarks form a 10 meter cube
|
||||||
|
* - The robot rotates around the landmarks, always facing towards the cube
|
||||||
|
*/
|
||||||
|
|
||||||
|
// As this is a full 3D problem, we will use Pose3 variables to represent the camera
|
||||||
|
// positions and Point3 variables (x, y, z) to represent the landmark coordinates.
|
||||||
|
// Camera observations of landmarks (i.e. pixel coordinates) will be stored as Point2 (x, y).
|
||||||
|
// We will also need a camera object to hold calibration information and perform projections.
|
||||||
|
#include <gtsam/geometry/Pose3.h>
|
||||||
|
#include <gtsam/geometry/Point3.h>
|
||||||
|
#include <gtsam/geometry/Point2.h>
|
||||||
|
#include <gtsam/geometry/SimpleCamera.h>
|
||||||
|
|
||||||
|
// Each variable in the system (poses and landmarks) must be identified with a unique key.
|
||||||
|
// We can either use simple integer keys (1, 2, 3, ...) or symbols (X1, X2, L1).
|
||||||
|
// Here we will use Symbols
|
||||||
#include <gtsam/nonlinear/Symbol.h>
|
#include <gtsam/nonlinear/Symbol.h>
|
||||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
|
||||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
// In GTSAM, measurement functions are represented as 'factors'. Several common factors
|
||||||
#include "VisualSLAMData.h"
|
// have been provided with the library for solving robotics/SLAM/Bundle Adjustment problems.
|
||||||
|
// Here we will use Projection factors to model the camera's landmark observations.
|
||||||
|
// Also, we will initialize the robot at some location using a Prior factor.
|
||||||
|
#include <gtsam/slam/PriorFactor.h>
|
||||||
|
#include <gtsam/slam/ProjectionFactor.h>
|
||||||
|
|
||||||
|
// When the factors are created, we will add them to a Factor Graph. As the factors we are using
|
||||||
|
// are nonlinear factors, we will need a Nonlinear Factor Graph.
|
||||||
|
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||||
|
|
||||||
|
// Finally, once all of the factors have been added to our factor graph, we will want to
|
||||||
|
// solve/optimize to graph to find the best (Maximum A Posteriori) set of variable values.
|
||||||
|
// GTSAM includes several nonlinear optimizers to perform this step. Here we will use a
|
||||||
|
// trust-region method known as Powell's Degleg
|
||||||
|
#include <gtsam/nonlinear/DoglegOptimizer.h>
|
||||||
|
|
||||||
|
// The nonlinear solvers within GTSAM are iterative solvers, meaning they linearize the
|
||||||
|
// nonlinear functions around an initial linearization point, then solve the linear system
|
||||||
|
// to update the linearization point. This happens repeatedly until the solver converges
|
||||||
|
// to a consistent set of variable values. This requires us to specify an initial guess
|
||||||
|
// for each variable, held in a Values container.
|
||||||
|
#include <gtsam/nonlinear/Values.h>
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
using namespace gtsam;
|
using namespace gtsam;
|
||||||
|
|
||||||
// Convenience for named keys
|
|
||||||
using symbol_shorthand::X;
|
|
||||||
using symbol_shorthand::L;
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
int main(int argc, char* argv[]) {
|
int main(int argc, char* argv[]) {
|
||||||
|
|
||||||
VisualSLAMExampleData data = VisualSLAMExampleData::generate();
|
// Define the camera calibration parameters
|
||||||
|
Cal3_S2::shared_ptr K(new Cal3_S2(50.0, 50.0, 0.0, 50.0, 50.0));
|
||||||
|
|
||||||
/* 1. Create graph *///using the 2D measurements (features) and the calibration data
|
// Define the camera observation noise model
|
||||||
visualSLAM::Graph graph;
|
noiseModel::Isotropic::shared_ptr measurementNoise = noiseModel::Isotropic::Sigma(2, 1.0); // one pixel in u and v
|
||||||
|
|
||||||
/* 2. Add factors to the graph */
|
// Create the set of ground-truth landmarks
|
||||||
// 2a. Measurement factors
|
std::vector<gtsam::Point3> points;
|
||||||
for (size_t i=0; i<data.poses.size(); ++i) {
|
points.push_back(gtsam::Point3(10.0,10.0,10.0));
|
||||||
for (size_t j=0; j<data.points.size(); ++j)
|
points.push_back(gtsam::Point3(-10.0,10.0,10.0));
|
||||||
graph.addMeasurement(data.z[i][j], data.noiseZ, X(i), L(j), data.sK);
|
points.push_back(gtsam::Point3(-10.0,-10.0,10.0));
|
||||||
|
points.push_back(gtsam::Point3(10.0,-10.0,10.0));
|
||||||
|
points.push_back(gtsam::Point3(10.0,10.0,-10.0));
|
||||||
|
points.push_back(gtsam::Point3(-10.0,10.0,-10.0));
|
||||||
|
points.push_back(gtsam::Point3(-10.0,-10.0,-10.0));
|
||||||
|
points.push_back(gtsam::Point3(10.0,-10.0,-10.0));
|
||||||
|
|
||||||
|
// Create the set of ground-truth poses
|
||||||
|
std::vector<gtsam::Pose3> poses;
|
||||||
|
double radius = 30.0;
|
||||||
|
int i = 0;
|
||||||
|
double theta = 0.0;
|
||||||
|
gtsam::Point3 up(0,0,1);
|
||||||
|
gtsam::Point3 target(0,0,0);
|
||||||
|
for(; i < 8; ++i, theta += 2*M_PI/8) {
|
||||||
|
gtsam::Point3 position = Point3(radius*cos(theta), radius*sin(theta), 0.0);
|
||||||
|
gtsam::SimpleCamera camera = SimpleCamera::Lookat(position, target, up);
|
||||||
|
poses.push_back(camera.pose());
|
||||||
}
|
}
|
||||||
// 2b. Prior factor for the first pose and point to constraint the system
|
|
||||||
graph.addPosePrior(X(0), data.poses[0], data.noiseX);
|
|
||||||
graph.addPointPrior(L(0), data.points[0], data.noiseL);
|
|
||||||
|
|
||||||
/* 3. Initial estimates for variable nodes, simulated by Gaussian noises */
|
// Create a factor graph
|
||||||
Values initial;
|
NonlinearFactorGraph graph;
|
||||||
for (size_t i=0; i<data.poses.size(); ++i)
|
|
||||||
initial.insert(X(i), data.poses[i]/* *Pose3::Expmap(data.noiseX->sample())*/); // you can add noise if you want
|
|
||||||
for (size_t j=0; j<data.points.size(); ++j)
|
|
||||||
initial.insert(L(j), data.points[j] /*+ Point3(data.noiseL->sample())*/); // you can add noise if you want
|
|
||||||
initial.print("Intial Estimates: ");
|
|
||||||
|
|
||||||
/* 4. Optimize the graph and print results */
|
// Add a prior on pose x1. This indirectly specifies where the origin is.
|
||||||
visualSLAM::Values result = GaussNewtonOptimizer(graph, initial).optimize();
|
noiseModel::Diagonal::shared_ptr poseNoise = noiseModel::Diagonal::Sigmas(Vector_(6, 0.3, 0.3, 0.3, 0.1, 0.1, 0.1)); // 30cm std on x,y,z 0.1 rad on roll,pitch,yaw
|
||||||
// visualSLAM::Values result = LevenbergMarquardtOptimizer(graph, initial).optimize();
|
graph.add(PriorFactor<Pose3>(Symbol('x', 0), poses[0], poseNoise)); // add directly to graph
|
||||||
result.print("Final results: ");
|
|
||||||
|
// Simulated measurements from each camera pose, adding them to the factor graph
|
||||||
|
for (size_t i = 0; i < poses.size(); ++i) {
|
||||||
|
for (size_t j = 0; j < points.size(); ++j) {
|
||||||
|
SimpleCamera camera(poses[i], *K);
|
||||||
|
Point2 measurement = camera.project(points[j]);
|
||||||
|
graph.add(GenericProjectionFactor<Pose3, Point3, Cal3_S2>(measurement, measurementNoise, Symbol('x', i), Symbol('l', j), K));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because the structure-from-motion problem has a scale ambiguity, the problem is still under-constrained
|
||||||
|
// Here we add a prior on the position of the first landmark. This fixes the scale by indicating the distance
|
||||||
|
// between the first camera and the first landmark. All other landmark positions are interpreted using this scale.
|
||||||
|
noiseModel::Isotropic::shared_ptr pointNoise = noiseModel::Isotropic::Sigma(3, 0.1);
|
||||||
|
graph.add(PriorFactor<Point3>(Symbol('l', 0), points[0], pointNoise)); // add directly to graph
|
||||||
|
graph.print("Factor Graph:\n");
|
||||||
|
|
||||||
|
// Create the data structure to hold the initialEstimate estimate to the solution
|
||||||
|
// Intentionally initialize the variables off from the ground truth
|
||||||
|
Values initialEstimate;
|
||||||
|
for (size_t i = 0; i < poses.size(); ++i)
|
||||||
|
initialEstimate.insert(Symbol('x', i), poses[i].compose(Pose3(Rot3::rodriguez(-0.1, 0.2, 0.25), Point3(0.05, -0.10, 0.20))));
|
||||||
|
for (size_t j = 0; j < points.size(); ++j)
|
||||||
|
initialEstimate.insert(Symbol('l', j), points[j].compose(Point3(-0.25, 0.20, 0.15)));
|
||||||
|
initialEstimate.print("Initial Estimates:\n");
|
||||||
|
|
||||||
|
/* Optimize the graph and print results */
|
||||||
|
Values result = DoglegOptimizer(graph, initialEstimate).optimize();
|
||||||
|
result.print("Final results:\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue