Merge pull request #1241 from borglab/lost-triangulate

Implementation of the LOST triangulation algorithm
release/4.3a0
Akshay Krishnan 2022-07-13 07:04:48 -07:00 committed by GitHub
commit b1f441dea9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 576 additions and 197 deletions

View File

@ -0,0 +1,159 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file TriangulationLOSTExample.cpp
* @author Akshay Krishnan
* @brief This example runs triangulation several times using 3 different
* approaches: LOST, DLT, and DLT with optimization. It reports the covariance
* and the runtime for each approach.
*
* @date 2022-07-10
*/
#include <gtsam/geometry/Cal3_S2.h>
#include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/geometry/Point2.h>
#include <gtsam/geometry/Point3.h>
#include <gtsam/geometry/Pose3.h>
#include <gtsam/geometry/Rot3.h>
#include <gtsam/geometry/triangulation.h>
#include <chrono>
#include <iostream>
#include <random>
using namespace std;
using namespace gtsam;
static std::mt19937 rng(42);
void PrintCovarianceStats(const Matrix& mat, const std::string& method) {
Matrix centered = mat.rowwise() - mat.colwise().mean();
Matrix cov = (centered.adjoint() * centered) / double(mat.rows() - 1);
std::cout << method << " covariance: " << std::endl;
std::cout << cov << std::endl;
std::cout << "Trace sqrt: " << sqrt(cov.trace()) << std::endl << std::endl;
}
void PrintDuration(const std::chrono::nanoseconds dur, double num_samples,
const std::string& method) {
double nanoseconds = dur.count() / num_samples;
std::cout << "Time taken by " << method << ": " << nanoseconds * 1e-3
<< std::endl;
}
void GetLargeCamerasDataset(CameraSet<PinholeCamera<Cal3_S2>>* cameras,
std::vector<Pose3>* poses, Point3* point,
Point2Vector* measurements) {
const double minXY = -10, maxXY = 10;
const double minZ = -20, maxZ = 0;
const int nrCameras = 500;
cameras->reserve(nrCameras);
poses->reserve(nrCameras);
measurements->reserve(nrCameras);
*point = Point3(0.0, 0.0, 10.0);
std::uniform_real_distribution<double> rand_xy(minXY, maxXY);
std::uniform_real_distribution<double> rand_z(minZ, maxZ);
Cal3_S2 identityK;
for (int i = 0; i < nrCameras; ++i) {
Point3 wti(rand_xy(rng), rand_xy(rng), rand_z(rng));
Pose3 wTi(Rot3(), wti);
poses->push_back(wTi);
cameras->emplace_back(wTi, identityK);
measurements->push_back(cameras->back().project(*point));
}
}
void GetSmallCamerasDataset(CameraSet<PinholeCamera<Cal3_S2>>* cameras,
std::vector<Pose3>* poses, Point3* point,
Point2Vector* measurements) {
Pose3 pose1;
Pose3 pose2(Rot3(), Point3(5., 0., -5.));
Cal3_S2 identityK;
PinholeCamera<Cal3_S2> camera1(pose1, identityK);
PinholeCamera<Cal3_S2> camera2(pose2, identityK);
*point = Point3(0, 0, 1);
cameras->push_back(camera1);
cameras->push_back(camera2);
*poses = {pose1, pose2};
*measurements = {camera1.project(*point), camera2.project(*point)};
}
Point2Vector AddNoiseToMeasurements(const Point2Vector& measurements,
const double measurementSigma) {
std::normal_distribution<double> normal(0.0, measurementSigma);
Point2Vector noisyMeasurements;
noisyMeasurements.reserve(measurements.size());
for (const auto& p : measurements) {
noisyMeasurements.emplace_back(p.x() + normal(rng), p.y() + normal(rng));
}
return noisyMeasurements;
}
/* ************************************************************************* */
int main(int argc, char* argv[]) {
CameraSet<PinholeCamera<Cal3_S2>> cameras;
std::vector<Pose3> poses;
Point3 landmark;
Point2Vector measurements;
GetLargeCamerasDataset(&cameras, &poses, &landmark, &measurements);
// GetSmallCamerasDataset(&cameras, &poses, &landmark, &measurements);
const double measurementSigma = 1e-2;
SharedNoiseModel measurementNoise =
noiseModel::Isotropic::Sigma(2, measurementSigma);
const long int nrTrials = 1000;
Matrix errorsDLT = Matrix::Zero(nrTrials, 3);
Matrix errorsLOST = Matrix::Zero(nrTrials, 3);
Matrix errorsDLTOpt = Matrix::Zero(nrTrials, 3);
double rank_tol = 1e-9;
boost::shared_ptr<Cal3_S2> calib = boost::make_shared<Cal3_S2>();
std::chrono::nanoseconds durationDLT;
std::chrono::nanoseconds durationDLTOpt;
std::chrono::nanoseconds durationLOST;
for (int i = 0; i < nrTrials; i++) {
Point2Vector noisyMeasurements =
AddNoiseToMeasurements(measurements, measurementSigma);
auto lostStart = std::chrono::high_resolution_clock::now();
boost::optional<Point3> estimateLOST = triangulatePoint3<Cal3_S2>(
cameras, noisyMeasurements, rank_tol, false, measurementNoise, true);
durationLOST += std::chrono::high_resolution_clock::now() - lostStart;
auto dltStart = std::chrono::high_resolution_clock::now();
boost::optional<Point3> estimateDLT = triangulatePoint3<Cal3_S2>(
cameras, noisyMeasurements, rank_tol, false, measurementNoise, false);
durationDLT += std::chrono::high_resolution_clock::now() - dltStart;
auto dltOptStart = std::chrono::high_resolution_clock::now();
boost::optional<Point3> estimateDLTOpt = triangulatePoint3<Cal3_S2>(
cameras, noisyMeasurements, rank_tol, true, measurementNoise, false);
durationDLTOpt += std::chrono::high_resolution_clock::now() - dltOptStart;
errorsLOST.row(i) = *estimateLOST - landmark;
errorsDLT.row(i) = *estimateDLT - landmark;
errorsDLTOpt.row(i) = *estimateDLTOpt - landmark;
}
PrintCovarianceStats(errorsLOST, "LOST");
PrintCovarianceStats(errorsDLT, "DLT");
PrintCovarianceStats(errorsDLTOpt, "DLT_OPT");
PrintDuration(durationLOST, nrTrials, "LOST");
PrintDuration(durationDLT, nrTrials, "DLT");
PrintDuration(durationDLTOpt, nrTrials, "DLT_OPT");
}

View File

@ -34,6 +34,7 @@ namespace gtsam {
/// As of GTSAM 4, in order to make GTSAM more lean,
/// it is now possible to just typedef Point3 to Vector3
typedef Vector3 Point3;
typedef std::vector<Point3, Eigen::aligned_allocator<Point3> > Point3Vector;
// Convenience typedef
using Point3Pair = std::pair<Point3, Point3>;

View File

@ -1162,7 +1162,8 @@ gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3_S2& cameras,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
const gtsam::SharedNoiseModel& model = nullptr,
const bool useLOST = false);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
gtsam::Cal3_S2* sharedCal,
const gtsam::Point2Vector& measurements,
@ -1184,7 +1185,8 @@ gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3DS2& cameras,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
const gtsam::SharedNoiseModel& model = nullptr,
const bool useLOST = false);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
gtsam::Cal3DS2* sharedCal,
const gtsam::Point2Vector& measurements,
@ -1206,7 +1208,8 @@ gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Bundler& cameras,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
const gtsam::SharedNoiseModel& model = nullptr,
const bool useLOST = false);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
gtsam::Cal3Bundler* sharedCal,
const gtsam::Point2Vector& measurements,
@ -1228,7 +1231,8 @@ gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Fisheye& cameras,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
const gtsam::SharedNoiseModel& model = nullptr,
const bool useLOST = false);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
gtsam::Cal3Fisheye* sharedCal,
const gtsam::Point2Vector& measurements,
@ -1250,7 +1254,8 @@ gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Unified& cameras,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
const gtsam::SharedNoiseModel& model = nullptr,
const bool useLOST = false);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
gtsam::Cal3Unified* sharedCal,
const gtsam::Point2Vector& measurements,

View File

@ -15,6 +15,7 @@
* @date July 30th, 2013
* @author Chris Beall (cbeall3)
* @author Luca Carlone
* @author Akshay Krishnan
*/
#include <CppUnitLite/TestHarness.h>
@ -38,24 +39,24 @@ using namespace boost::assign;
// Some common constants
static const boost::shared_ptr<Cal3_S2> sharedCal = //
static const boost::shared_ptr<Cal3_S2> kSharedCal = //
boost::make_shared<Cal3_S2>(1500, 1200, 0.1, 640, 480);
// Looking along X-axis, 1 meter above ground plane (x-y)
static const Rot3 upright = Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2);
static const Pose3 pose1 = Pose3(upright, gtsam::Point3(0, 0, 1));
PinholeCamera<Cal3_S2> camera1(pose1, *sharedCal);
static const Pose3 kPose1 = Pose3(upright, gtsam::Point3(0, 0, 1));
static const PinholeCamera<Cal3_S2> kCamera1(kPose1, *kSharedCal);
// create second camera 1 meter to the right of first camera
static const Pose3 pose2 = pose1 * Pose3(Rot3(), Point3(1, 0, 0));
PinholeCamera<Cal3_S2> camera2(pose2, *sharedCal);
static const Pose3 kPose2 = kPose1 * Pose3(Rot3(), Point3(1, 0, 0));
static const PinholeCamera<Cal3_S2> kCamera2(kPose2, *kSharedCal);
// landmark ~5 meters infront of camera
static const Point3 landmark(5, 0.5, 1.2);
static const Point3 kLandmark(5, 0.5, 1.2);
// 1. Project two landmarks into two cameras and triangulate
Point2 z1 = camera1.project(landmark);
Point2 z2 = camera2.project(landmark);
static const Point2 kZ1 = kCamera1.project(kLandmark);
static const Point2 kZ2 = kCamera2.project(kLandmark);
//******************************************************************************
// Simple test with a well-behaved two camera situation
@ -63,22 +64,22 @@ TEST(triangulation, twoPoses) {
vector<Pose3> poses;
Point2Vector measurements;
poses += pose1, pose2;
measurements += z1, z2;
poses += kPose1, kPose2;
measurements += kZ1, kZ2;
double rank_tol = 1e-9;
// 1. Test simple DLT, perfect in no noise situation
bool optimize = false;
boost::optional<Point3> actual1 = //
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
EXPECT(assert_equal(landmark, *actual1, 1e-7));
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, rank_tol, optimize);
EXPECT(assert_equal(kLandmark, *actual1, 1e-7));
// 2. test with optimization on, same answer
optimize = true;
boost::optional<Point3> actual2 = //
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
EXPECT(assert_equal(landmark, *actual2, 1e-7));
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, rank_tol, optimize);
EXPECT(assert_equal(kLandmark, *actual2, 1e-7));
// 3. Add some noise and try again: result should be ~ (4.995,
// 0.499167, 1.19814)
@ -86,16 +87,79 @@ TEST(triangulation, twoPoses) {
measurements.at(1) += Point2(-0.2, 0.3);
optimize = false;
boost::optional<Point3> actual3 = //
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, rank_tol, optimize);
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual3, 1e-4));
// 4. Now with optimization on
optimize = true;
boost::optional<Point3> actual4 = //
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, rank_tol, optimize);
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-4));
}
TEST(triangulation, twoCamerasUsingLOST) {
CameraSet<PinholeCamera<Cal3_S2>> cameras;
cameras.push_back(kCamera1);
cameras.push_back(kCamera2);
Point2Vector measurements = {kZ1, kZ2};
SharedNoiseModel measurementNoise = noiseModel::Isotropic::Sigma(2, 1e-4);
double rank_tol = 1e-9;
// 1. Test simple triangulation, perfect in no noise situation
boost::optional<Point3> actual1 = //
triangulatePoint3<PinholeCamera<Cal3_S2>>(cameras, measurements, rank_tol,
/*optimize=*/false,
measurementNoise,
/*useLOST=*/true);
EXPECT(assert_equal(kLandmark, *actual1, 1e-12));
// 3. Add some noise and try again: result should be ~ (4.995,
// 0.499167, 1.19814)
measurements[0] += Point2(0.1, 0.5);
measurements[1] += Point2(-0.2, 0.3);
boost::optional<Point3> actual2 = //
triangulatePoint3<PinholeCamera<Cal3_S2>>(
cameras, measurements, rank_tol,
/*optimize=*/false, measurementNoise,
/*use_lost_triangulation=*/true);
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual2, 1e-4));
}
TEST(triangulation, twoCamerasLOSTvsDLT) {
// LOST has been shown to preform better when the point is much closer to one
// camera compared to another. This unit test checks this configuration.
const Cal3_S2 identityK;
const Pose3 pose1;
const Pose3 pose2(Rot3(), Point3(5., 0., -5.));
CameraSet<PinholeCamera<Cal3_S2>> cameras;
cameras.emplace_back(pose1, identityK);
cameras.emplace_back(pose2, identityK);
Point3 landmark(0, 0, 1);
Point2 x1_noisy = cameras[0].project(landmark) + Point2(0.00817, 0.00977);
Point2 x2_noisy = cameras[1].project(landmark) + Point2(-0.00610, 0.01969);
Point2Vector measurements = {x1_noisy, x2_noisy};
const double rank_tol = 1e-9;
SharedNoiseModel measurementNoise = noiseModel::Isotropic::Sigma(2, 1e-2);
boost::optional<Point3> estimateLOST = //
triangulatePoint3<PinholeCamera<Cal3_S2>>(cameras, measurements, rank_tol,
/*optimize=*/false,
measurementNoise,
/*useLOST=*/true);
// These values are from a MATLAB implementation.
EXPECT(assert_equal(Point3(0.007, 0.011, 0.945), *estimateLOST, 1e-3));
boost::optional<Point3> estimateDLT =
triangulatePoint3<Cal3_S2>(cameras, measurements, rank_tol, false);
// The LOST estimate should have a smaller error.
EXPECT((landmark - *estimateLOST).norm() <= (landmark - *estimateDLT).norm());
}
//******************************************************************************
// Simple test with a well-behaved two camera situation with Cal3DS2 calibration.
TEST(triangulation, twoPosesCal3DS2) {
@ -103,18 +167,18 @@ TEST(triangulation, twoPosesCal3DS2) {
boost::make_shared<Cal3DS2>(1500, 1200, 0, 640, 480, -.3, 0.1, 0.0001,
-0.0003);
PinholeCamera<Cal3DS2> camera1Distorted(pose1, *sharedDistortedCal);
PinholeCamera<Cal3DS2> camera1Distorted(kPose1, *sharedDistortedCal);
PinholeCamera<Cal3DS2> camera2Distorted(pose2, *sharedDistortedCal);
PinholeCamera<Cal3DS2> camera2Distorted(kPose2, *sharedDistortedCal);
// 0. Project two landmarks into two cameras and triangulate
Point2 z1Distorted = camera1Distorted.project(landmark);
Point2 z2Distorted = camera2Distorted.project(landmark);
Point2 z1Distorted = camera1Distorted.project(kLandmark);
Point2 z2Distorted = camera2Distorted.project(kLandmark);
vector<Pose3> poses;
Point2Vector measurements;
poses += pose1, pose2;
poses += kPose1, kPose2;
measurements += z1Distorted, z2Distorted;
double rank_tol = 1e-9;
@ -124,14 +188,14 @@ TEST(triangulation, twoPosesCal3DS2) {
boost::optional<Point3> actual1 = //
triangulatePoint3<Cal3DS2>(poses, sharedDistortedCal, measurements,
rank_tol, optimize);
EXPECT(assert_equal(landmark, *actual1, 1e-7));
EXPECT(assert_equal(kLandmark, *actual1, 1e-7));
// 2. test with optimization on, same answer
optimize = true;
boost::optional<Point3> actual2 = //
triangulatePoint3<Cal3DS2>(poses, sharedDistortedCal, measurements,
rank_tol, optimize);
EXPECT(assert_equal(landmark, *actual2, 1e-7));
EXPECT(assert_equal(kLandmark, *actual2, 1e-7));
// 3. Add some noise and try again: result should be ~ (4.995,
// 0.499167, 1.19814)
@ -160,18 +224,18 @@ TEST(triangulation, twoPosesFisheye) {
boost::make_shared<Calibration>(1500, 1200, .1, 640, 480, -.3, 0.1,
0.0001, -0.0003);
PinholeCamera<Calibration> camera1Distorted(pose1, *sharedDistortedCal);
PinholeCamera<Calibration> camera1Distorted(kPose1, *sharedDistortedCal);
PinholeCamera<Calibration> camera2Distorted(pose2, *sharedDistortedCal);
PinholeCamera<Calibration> camera2Distorted(kPose2, *sharedDistortedCal);
// 0. Project two landmarks into two cameras and triangulate
Point2 z1Distorted = camera1Distorted.project(landmark);
Point2 z2Distorted = camera2Distorted.project(landmark);
Point2 z1Distorted = camera1Distorted.project(kLandmark);
Point2 z2Distorted = camera2Distorted.project(kLandmark);
vector<Pose3> poses;
Point2Vector measurements;
poses += pose1, pose2;
poses += kPose1, kPose2;
measurements += z1Distorted, z2Distorted;
double rank_tol = 1e-9;
@ -181,14 +245,14 @@ TEST(triangulation, twoPosesFisheye) {
boost::optional<Point3> actual1 = //
triangulatePoint3<Calibration>(poses, sharedDistortedCal, measurements,
rank_tol, optimize);
EXPECT(assert_equal(landmark, *actual1, 1e-7));
EXPECT(assert_equal(kLandmark, *actual1, 1e-7));
// 2. test with optimization on, same answer
optimize = true;
boost::optional<Point3> actual2 = //
triangulatePoint3<Calibration>(poses, sharedDistortedCal, measurements,
rank_tol, optimize);
EXPECT(assert_equal(landmark, *actual2, 1e-7));
EXPECT(assert_equal(kLandmark, *actual2, 1e-7));
// 3. Add some noise and try again: result should be ~ (4.995,
// 0.499167, 1.19814)
@ -213,17 +277,17 @@ TEST(triangulation, twoPosesFisheye) {
TEST(triangulation, twoPosesBundler) {
boost::shared_ptr<Cal3Bundler> bundlerCal = //
boost::make_shared<Cal3Bundler>(1500, 0.1, 0.2, 640, 480);
PinholeCamera<Cal3Bundler> camera1(pose1, *bundlerCal);
PinholeCamera<Cal3Bundler> camera2(pose2, *bundlerCal);
PinholeCamera<Cal3Bundler> camera1(kPose1, *bundlerCal);
PinholeCamera<Cal3Bundler> camera2(kPose2, *bundlerCal);
// 1. Project two landmarks into two cameras and triangulate
Point2 z1 = camera1.project(landmark);
Point2 z2 = camera2.project(landmark);
Point2 z1 = camera1.project(kLandmark);
Point2 z2 = camera2.project(kLandmark);
vector<Pose3> poses;
Point2Vector measurements;
poses += pose1, pose2;
poses += kPose1, kPose2;
measurements += z1, z2;
bool optimize = true;
@ -232,7 +296,7 @@ TEST(triangulation, twoPosesBundler) {
boost::optional<Point3> actual = //
triangulatePoint3<Cal3Bundler>(poses, bundlerCal, measurements, rank_tol,
optimize);
EXPECT(assert_equal(landmark, *actual, 1e-7));
EXPECT(assert_equal(kLandmark, *actual, 1e-7));
// Add some noise and try again
measurements.at(0) += Point2(0.1, 0.5);
@ -249,12 +313,12 @@ TEST(triangulation, fourPoses) {
vector<Pose3> poses;
Point2Vector measurements;
poses += pose1, pose2;
measurements += z1, z2;
poses += kPose1, kPose2;
measurements += kZ1, kZ2;
boost::optional<Point3> actual =
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
EXPECT(assert_equal(landmark, *actual, 1e-2));
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
// 2. Add some noise and try again: result should be ~ (4.995,
// 0.499167, 1.19814)
@ -262,37 +326,37 @@ TEST(triangulation, fourPoses) {
measurements.at(1) += Point2(-0.2, 0.3);
boost::optional<Point3> actual2 = //
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
EXPECT(assert_equal(landmark, *actual2, 1e-2));
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
EXPECT(assert_equal(kLandmark, *actual2, 1e-2));
// 3. Add a slightly rotated third camera above, again with measurement noise
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
PinholeCamera<Cal3_S2> camera3(pose3, *sharedCal);
Point2 z3 = camera3.project(landmark);
Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
PinholeCamera<Cal3_S2> camera3(pose3, *kSharedCal);
Point2 z3 = camera3.project(kLandmark);
poses += pose3;
measurements += z3 + Point2(0.1, -0.1);
boost::optional<Point3> triangulated_3cameras = //
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2));
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
EXPECT(assert_equal(kLandmark, *triangulated_3cameras, 1e-2));
// Again with nonlinear optimization
boost::optional<Point3> triangulated_3cameras_opt =
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, 1e-9, true);
EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2));
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, 1e-9, true);
EXPECT(assert_equal(kLandmark, *triangulated_3cameras_opt, 1e-2));
// 4. Test failure: Add a 4th camera facing the wrong way
Pose3 pose4 = Pose3(Rot3::Ypr(M_PI / 2, 0., -M_PI / 2), Point3(0, 0, 1));
PinholeCamera<Cal3_S2> camera4(pose4, *sharedCal);
PinholeCamera<Cal3_S2> camera4(pose4, *kSharedCal);
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
CHECK_EXCEPTION(camera4.project(landmark), CheiralityException);
CHECK_EXCEPTION(camera4.project(kLandmark), CheiralityException);
poses += pose4;
measurements += Point2(400, 400);
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements),
TriangulationCheiralityException);
#endif
}
@ -300,33 +364,33 @@ TEST(triangulation, fourPoses) {
//******************************************************************************
TEST(triangulation, threePoses_robustNoiseModel) {
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
PinholeCamera<Cal3_S2> camera3(pose3, *sharedCal);
Point2 z3 = camera3.project(landmark);
Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
PinholeCamera<Cal3_S2> camera3(pose3, *kSharedCal);
Point2 z3 = camera3.project(kLandmark);
vector<Pose3> poses;
Point2Vector measurements;
poses += pose1, pose2, pose3;
measurements += z1, z2, z3;
poses += kPose1, kPose2, pose3;
measurements += kZ1, kZ2, z3;
// noise free, so should give exactly the landmark
boost::optional<Point3> actual =
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
EXPECT(assert_equal(landmark, *actual, 1e-2));
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
// Add outlier
measurements.at(0) += Point2(100, 120); // very large pixel noise!
// now estimate does not match landmark
boost::optional<Point3> actual2 = //
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
// DLT is surprisingly robust, but still off (actual error is around 0.26m):
EXPECT( (landmark - *actual2).norm() >= 0.2);
EXPECT( (landmark - *actual2).norm() <= 0.5);
EXPECT( (kLandmark - *actual2).norm() >= 0.2);
EXPECT( (kLandmark - *actual2).norm() <= 0.5);
// Again with nonlinear optimization
boost::optional<Point3> actual3 =
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, 1e-9, true);
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, 1e-9, true);
// result from nonlinear (but non-robust optimization) is close to DLT and still off
EXPECT(assert_equal(*actual2, *actual3, 0.1));
@ -334,27 +398,27 @@ TEST(triangulation, threePoses_robustNoiseModel) {
auto model = noiseModel::Robust::Create(
noiseModel::mEstimator::Huber::Create(1.345), noiseModel::Unit::Create(2));
boost::optional<Point3> actual4 = triangulatePoint3<Cal3_S2>(
poses, sharedCal, measurements, 1e-9, true, model);
poses, kSharedCal, measurements, 1e-9, true, model);
// using the Huber loss we now have a quite small error!! nice!
EXPECT(assert_equal(landmark, *actual4, 0.05));
EXPECT(assert_equal(kLandmark, *actual4, 0.05));
}
//******************************************************************************
TEST(triangulation, fourPoses_robustNoiseModel) {
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
PinholeCamera<Cal3_S2> camera3(pose3, *sharedCal);
Point2 z3 = camera3.project(landmark);
Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
PinholeCamera<Cal3_S2> camera3(pose3, *kSharedCal);
Point2 z3 = camera3.project(kLandmark);
vector<Pose3> poses;
Point2Vector measurements;
poses += pose1, pose1, pose2, pose3; // 2 measurements from pose 1
measurements += z1, z1, z2, z3;
poses += kPose1, kPose1, kPose2, pose3; // 2 measurements from pose 1
measurements += kZ1, kZ1, kZ2, z3;
// noise free, so should give exactly the landmark
boost::optional<Point3> actual =
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
EXPECT(assert_equal(landmark, *actual, 1e-2));
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
// Add outlier
measurements.at(0) += Point2(100, 120); // very large pixel noise!
@ -365,14 +429,14 @@ TEST(triangulation, fourPoses_robustNoiseModel) {
// now estimate does not match landmark
boost::optional<Point3> actual2 = //
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements);
// DLT is surprisingly robust, but still off (actual error is around 0.17m):
EXPECT( (landmark - *actual2).norm() >= 0.1);
EXPECT( (landmark - *actual2).norm() <= 0.5);
EXPECT( (kLandmark - *actual2).norm() >= 0.1);
EXPECT( (kLandmark - *actual2).norm() <= 0.5);
// Again with nonlinear optimization
boost::optional<Point3> actual3 =
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, 1e-9, true);
triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements, 1e-9, true);
// result from nonlinear (but non-robust optimization) is close to DLT and still off
EXPECT(assert_equal(*actual2, *actual3, 0.1));
@ -380,24 +444,24 @@ TEST(triangulation, fourPoses_robustNoiseModel) {
auto model = noiseModel::Robust::Create(
noiseModel::mEstimator::Huber::Create(1.345), noiseModel::Unit::Create(2));
boost::optional<Point3> actual4 = triangulatePoint3<Cal3_S2>(
poses, sharedCal, measurements, 1e-9, true, model);
poses, kSharedCal, measurements, 1e-9, true, model);
// using the Huber loss we now have a quite small error!! nice!
EXPECT(assert_equal(landmark, *actual4, 0.05));
EXPECT(assert_equal(kLandmark, *actual4, 0.05));
}
//******************************************************************************
TEST(triangulation, fourPoses_distinct_Ks) {
Cal3_S2 K1(1500, 1200, 0, 640, 480);
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
PinholeCamera<Cal3_S2> camera1(pose1, K1);
PinholeCamera<Cal3_S2> camera1(kPose1, K1);
// create second camera 1 meter to the right of first camera
Cal3_S2 K2(1600, 1300, 0, 650, 440);
PinholeCamera<Cal3_S2> camera2(pose2, K2);
PinholeCamera<Cal3_S2> camera2(kPose2, K2);
// 1. Project two landmarks into two cameras and triangulate
Point2 z1 = camera1.project(landmark);
Point2 z2 = camera2.project(landmark);
Point2 z1 = camera1.project(kLandmark);
Point2 z2 = camera2.project(kLandmark);
CameraSet<PinholeCamera<Cal3_S2>> cameras;
Point2Vector measurements;
@ -407,7 +471,7 @@ TEST(triangulation, fourPoses_distinct_Ks) {
boost::optional<Point3> actual = //
triangulatePoint3<Cal3_S2>(cameras, measurements);
EXPECT(assert_equal(landmark, *actual, 1e-2));
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
// 2. Add some noise and try again: result should be ~ (4.995,
// 0.499167, 1.19814)
@ -416,25 +480,25 @@ TEST(triangulation, fourPoses_distinct_Ks) {
boost::optional<Point3> actual2 = //
triangulatePoint3<Cal3_S2>(cameras, measurements);
EXPECT(assert_equal(landmark, *actual2, 1e-2));
EXPECT(assert_equal(kLandmark, *actual2, 1e-2));
// 3. Add a slightly rotated third camera above, again with measurement noise
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
Cal3_S2 K3(700, 500, 0, 640, 480);
PinholeCamera<Cal3_S2> camera3(pose3, K3);
Point2 z3 = camera3.project(landmark);
Point2 z3 = camera3.project(kLandmark);
cameras += camera3;
measurements += z3 + Point2(0.1, -0.1);
boost::optional<Point3> triangulated_3cameras = //
triangulatePoint3<Cal3_S2>(cameras, measurements);
EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2));
EXPECT(assert_equal(kLandmark, *triangulated_3cameras, 1e-2));
// Again with nonlinear optimization
boost::optional<Point3> triangulated_3cameras_opt =
triangulatePoint3<Cal3_S2>(cameras, measurements, 1e-9, true);
EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2));
EXPECT(assert_equal(kLandmark, *triangulated_3cameras_opt, 1e-2));
// 4. Test failure: Add a 4th camera facing the wrong way
Pose3 pose4 = Pose3(Rot3::Ypr(M_PI / 2, 0., -M_PI / 2), Point3(0, 0, 1));
@ -442,7 +506,7 @@ TEST(triangulation, fourPoses_distinct_Ks) {
PinholeCamera<Cal3_S2> camera4(pose4, K4);
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
CHECK_EXCEPTION(camera4.project(landmark), CheiralityException);
CHECK_EXCEPTION(camera4.project(kLandmark), CheiralityException);
cameras += camera4;
measurements += Point2(400, 400);
@ -455,15 +519,15 @@ TEST(triangulation, fourPoses_distinct_Ks) {
TEST(triangulation, fourPoses_distinct_Ks_distortion) {
Cal3DS2 K1(1500, 1200, 0, 640, 480, -.3, 0.1, 0.0001, -0.0003);
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
PinholeCamera<Cal3DS2> camera1(pose1, K1);
PinholeCamera<Cal3DS2> camera1(kPose1, K1);
// create second camera 1 meter to the right of first camera
Cal3DS2 K2(1600, 1300, 0, 650, 440, -.2, 0.05, 0.0002, -0.0001);
PinholeCamera<Cal3DS2> camera2(pose2, K2);
PinholeCamera<Cal3DS2> camera2(kPose2, K2);
// 1. Project two landmarks into two cameras and triangulate
Point2 z1 = camera1.project(landmark);
Point2 z2 = camera2.project(landmark);
Point2 z1 = camera1.project(kLandmark);
Point2 z2 = camera2.project(kLandmark);
CameraSet<PinholeCamera<Cal3DS2>> cameras;
Point2Vector measurements;
@ -473,22 +537,22 @@ TEST(triangulation, fourPoses_distinct_Ks_distortion) {
boost::optional<Point3> actual = //
triangulatePoint3<Cal3DS2>(cameras, measurements);
EXPECT(assert_equal(landmark, *actual, 1e-2));
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
}
//******************************************************************************
TEST(triangulation, outliersAndFarLandmarks) {
Cal3_S2 K1(1500, 1200, 0, 640, 480);
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
PinholeCamera<Cal3_S2> camera1(pose1, K1);
PinholeCamera<Cal3_S2> camera1(kPose1, K1);
// create second camera 1 meter to the right of first camera
Cal3_S2 K2(1600, 1300, 0, 650, 440);
PinholeCamera<Cal3_S2> camera2(pose2, K2);
PinholeCamera<Cal3_S2> camera2(kPose2, K2);
// 1. Project two landmarks into two cameras and triangulate
Point2 z1 = camera1.project(landmark);
Point2 z2 = camera2.project(landmark);
Point2 z1 = camera1.project(kLandmark);
Point2 z2 = camera2.project(kLandmark);
CameraSet<PinholeCamera<Cal3_S2>> cameras;
Point2Vector measurements;
@ -501,7 +565,7 @@ TEST(triangulation, outliersAndFarLandmarks) {
1.0, false, landmarkDistanceThreshold); // all default except
// landmarkDistanceThreshold
TriangulationResult actual = triangulateSafe(cameras, measurements, params);
EXPECT(assert_equal(landmark, *actual, 1e-2));
EXPECT(assert_equal(kLandmark, *actual, 1e-2));
EXPECT(actual.valid());
landmarkDistanceThreshold = 4; // landmark is farther than that
@ -513,10 +577,10 @@ TEST(triangulation, outliersAndFarLandmarks) {
// 3. Add a slightly rotated third camera above with a wrong measurement
// (OUTLIER)
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
Pose3 pose3 = kPose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
Cal3_S2 K3(700, 500, 0, 640, 480);
PinholeCamera<Cal3_S2> camera3(pose3, K3);
Point2 z3 = camera3.project(landmark);
Point2 z3 = camera3.project(kLandmark);
cameras += camera3;
measurements += z3 + Point2(10, -10);
@ -539,18 +603,18 @@ TEST(triangulation, outliersAndFarLandmarks) {
//******************************************************************************
TEST(triangulation, twoIdenticalPoses) {
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
PinholeCamera<Cal3_S2> camera1(pose1, *sharedCal);
PinholeCamera<Cal3_S2> camera1(kPose1, *kSharedCal);
// 1. Project two landmarks into two cameras and triangulate
Point2 z1 = camera1.project(landmark);
Point2 z1 = camera1.project(kLandmark);
vector<Pose3> poses;
Point2Vector measurements;
poses += pose1, pose1;
poses += kPose1, kPose1;
measurements += z1, z1;
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements),
TriangulationUnderconstrainedException);
}
@ -565,7 +629,7 @@ TEST(triangulation, onePose) {
poses += Pose3();
measurements += Point2(0, 0);
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, kSharedCal, measurements),
TriangulationUnderconstrainedException);
}
@ -681,12 +745,12 @@ TEST(triangulation, twoPoses_sphericalCamera) {
std::vector<Unit3> measurements;
// Project landmark into two cameras and triangulate
SphericalCamera cam1(pose1);
SphericalCamera cam2(pose2);
Unit3 u1 = cam1.project(landmark);
Unit3 u2 = cam2.project(landmark);
SphericalCamera cam1(kPose1);
SphericalCamera cam2(kPose2);
Unit3 u1 = cam1.project(kLandmark);
Unit3 u2 = cam2.project(kLandmark);
poses += pose1, pose2;
poses += kPose1, kPose2;
measurements += u1, u2;
CameraSet<SphericalCamera> cameras;
@ -698,25 +762,25 @@ TEST(triangulation, twoPoses_sphericalCamera) {
// 1. Test linear triangulation via DLT
auto projection_matrices = projectionMatricesFromCameras(cameras);
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
EXPECT(assert_equal(landmark, point, 1e-7));
EXPECT(assert_equal(kLandmark, point, 1e-7));
// 2. Test nonlinear triangulation
point = triangulateNonlinear<SphericalCamera>(cameras, measurements, point);
EXPECT(assert_equal(landmark, point, 1e-7));
EXPECT(assert_equal(kLandmark, point, 1e-7));
// 3. Test simple DLT, now within triangulatePoint3
bool optimize = false;
boost::optional<Point3> actual1 = //
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
optimize);
EXPECT(assert_equal(landmark, *actual1, 1e-7));
EXPECT(assert_equal(kLandmark, *actual1, 1e-7));
// 4. test with optimization on, same answer
optimize = true;
boost::optional<Point3> actual2 = //
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
optimize);
EXPECT(assert_equal(landmark, *actual2, 1e-7));
EXPECT(assert_equal(kLandmark, *actual2, 1e-7));
// 5. Add some noise and try again: result should be ~ (4.995,
// 0.499167, 1.19814)
@ -761,7 +825,7 @@ TEST(triangulation, twoPoses_sphericalCamera_extremeFOV) {
EXPECT(assert_equal(Unit3(Point3(1.0, 0.0, -1.0)), u2,
1e-7)); // behind and to the right of PoseB
poses += pose1, pose2;
poses += kPose1, kPose2;
measurements += u1, u2;
CameraSet<SphericalCamera> cameras;

View File

@ -14,6 +14,7 @@
* @brief Functions for triangulation
* @date July 31, 2013
* @author Chris Beall
* @author Akshay Krishnan
*/
#include <gtsam/geometry/triangulation.h>
@ -24,9 +25,9 @@
namespace gtsam {
Vector4 triangulateHomogeneousDLT(
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
projection_matrices,
const Point2Vector& measurements, double rank_tol) {
// number of cameras
size_t m = projection_matrices.size();
@ -39,6 +40,11 @@ Vector4 triangulateHomogeneousDLT(
const Point2& p = measurements.at(i);
// build system of equations
// [A_1; A_2; A_3] x = [b_1; b_2; b_3]
// [b_3 * A_1 - b_1 * A_3] x = 0
// [b_3 * A_2 - b_2 * A_3] x = 0
// A' x = 0
// A' 2x4 = [b_3 * A_1 - b_1 * A_3; b_3 * A_2 - b_2 * A_3]
A.row(row) = p.x() * projection.row(2) - projection.row(0);
A.row(row + 1) = p.y() * projection.row(2) - projection.row(1);
}
@ -47,16 +53,15 @@ Vector4 triangulateHomogeneousDLT(
Vector v;
boost::tie(rank, error, v) = DLT(A, rank_tol);
if (rank < 3)
throw(TriangulationUnderconstrainedException());
if (rank < 3) throw(TriangulationUnderconstrainedException());
return v;
}
Vector4 triangulateHomogeneousDLT(
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
projection_matrices,
const std::vector<Unit3>& measurements, double rank_tol) {
// number of cameras
size_t m = projection_matrices.size();
@ -66,7 +71,9 @@ Vector4 triangulateHomogeneousDLT(
for (size_t i = 0; i < m; i++) {
size_t row = i * 2;
const Matrix34& projection = projection_matrices.at(i);
const Point3& p = measurements.at(i).point3(); // to get access to x,y,z of the bearing vector
const Point3& p =
measurements.at(i)
.point3(); // to get access to x,y,z of the bearing vector
// build system of equations
A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0);
@ -77,34 +84,67 @@ Vector4 triangulateHomogeneousDLT(
Vector v;
boost::tie(rank, error, v) = DLT(A, rank_tol);
if (rank < 3)
throw(TriangulationUnderconstrainedException());
if (rank < 3) throw(TriangulationUnderconstrainedException());
return v;
}
Point3 triangulateDLT(
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
const Point2Vector& measurements, double rank_tol) {
Point3 triangulateLOST(const std::vector<Pose3>& poses,
const Point3Vector& calibratedMeasurements,
const SharedIsotropic& measurementNoise) {
size_t m = calibratedMeasurements.size();
assert(m == poses.size());
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements,
rank_tol);
// Construct the system matrices.
Matrix A = Matrix::Zero(m * 2, 3);
Matrix b = Matrix::Zero(m * 2, 1);
for (size_t i = 0; i < m; i++) {
const Pose3& wTi = poses[i];
// TODO(akshay-krishnan): are there better ways to select j?
const int j = (i + 1) % m;
const Pose3& wTj = poses[j];
const Point3 d_ij = wTj.translation() - wTi.translation();
const Point3 wZi = wTi.rotation().rotate(calibratedMeasurements[i]);
const Point3 wZj = wTj.rotation().rotate(calibratedMeasurements[j]);
// Note: Setting q_i = 1.0 gives same results as DLT.
const double q_i = wZi.cross(wZj).norm() /
(measurementNoise->sigma() * d_ij.cross(wZj).norm());
const Matrix23 coefficientMat =
q_i * skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) *
wTi.rotation().matrix().transpose();
A.block<2, 3>(2 * i, 0) << coefficientMat;
b.block<2, 1>(2 * i, 0) << coefficientMat * wTi.translation();
}
return A.colPivHouseholderQr().solve(b);
}
Point3 triangulateDLT(
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
projection_matrices,
const Point2Vector& measurements, double rank_tol) {
Vector4 v =
triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol);
// Create 3D point from homogeneous coordinates
return Point3(v.head<3>() / v[3]);
}
Point3 triangulateDLT(
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
projection_matrices,
const std::vector<Unit3>& measurements, double rank_tol) {
// contrary to previous triangulateDLT, this is now taking Unit3 inputs
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements,
rank_tol);
// Create 3D point from homogeneous coordinates
return Point3(v.head<3>() / v[3]);
Vector4 v =
triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol);
// Create 3D point from homogeneous coordinates
return Point3(v.head<3>() / v[3]);
}
///
/**
* Optimize for triangulation
* @param graph nonlinear factors for projection
@ -132,4 +172,4 @@ Point3 optimize(const NonlinearFactorGraph& graph, const Values& values,
return result.at<Point3>(landmarkKey);
}
} // \namespace gtsam
} // namespace gtsam

View File

@ -15,6 +15,7 @@
* @date July 31, 2013
* @author Chris Beall
* @author Luca Carlone
* @author Akshay Krishnan
*/
#pragma once
@ -94,6 +95,20 @@ GTSAM_EXPORT Point3 triangulateDLT(
const std::vector<Unit3>& measurements,
double rank_tol = 1e-9);
/**
* @brief Triangulation using the LOST (Linear Optimal Sine Triangulation)
* algorithm proposed in https://arxiv.org/pdf/2205.12197.pdf by Sebastien Henry
* and John Christian.
* @param poses camera poses in world frame
* @param calibratedMeasurements measurements in homogeneous coordinates in each
* camera pose
* @param measurementNoise isotropic noise model for the measurements
* @return triangulated point in world coordinates
*/
GTSAM_EXPORT Point3 triangulateLOST(const std::vector<Pose3>& poses,
const Point3Vector& calibratedMeasurements,
const SharedIsotropic& measurementNoise);
/**
* Create a factor graph with projection factors from poses and one calibration
* @param poses Camera poses
@ -301,10 +316,10 @@ template <class CAMERA>
typename CAMERA::MeasurementVector undistortMeasurements(
const CameraSet<CAMERA>& cameras,
const typename CAMERA::MeasurementVector& measurements) {
const size_t num_meas = cameras.size();
assert(num_meas == measurements.size());
typename CAMERA::MeasurementVector undistortedMeasurements(num_meas);
for (size_t ii = 0; ii < num_meas; ++ii) {
const size_t nrMeasurements = measurements.size();
assert(nrMeasurements == cameras.size());
typename CAMERA::MeasurementVector undistortedMeasurements(nrMeasurements);
for (size_t ii = 0; ii < nrMeasurements; ++ii) {
// Calibrate with cal and uncalibrate with pinhole version of cal so that
// measurements are undistorted.
undistortedMeasurements[ii] =
@ -330,6 +345,65 @@ inline SphericalCamera::MeasurementVector undistortMeasurements(
return measurements;
}
/** Convert pixel measurements in image to homogeneous measurements in the image
* plane using shared camera intrinsics.
*
* @tparam CALIBRATION Calibration type to use.
* @param cal Calibration with which measurements were taken.
* @param measurements Vector of measurements to undistort.
* @return homogeneous measurements in image plane
*/
template <class CALIBRATION>
inline Point3Vector calibrateMeasurementsShared(
const CALIBRATION& cal, const Point2Vector& measurements) {
Point3Vector calibratedMeasurements;
// Calibrate with cal and uncalibrate with pinhole version of cal so that
// measurements are undistorted.
std::transform(measurements.begin(), measurements.end(),
std::back_inserter(calibratedMeasurements),
[&cal](const Point2& measurement) {
Point3 p;
p << cal.calibrate(measurement), 1.0;
return p;
});
return calibratedMeasurements;
}
/** Convert pixel measurements in image to homogeneous measurements in the image
* plane using camera intrinsics of each measurement.
*
* @tparam CAMERA Camera type to use.
* @param cameras Cameras corresponding to each measurement.
* @param measurements Vector of measurements to undistort.
* @return homogeneous measurements in image plane
*/
template <class CAMERA>
inline Point3Vector calibrateMeasurements(
const CameraSet<CAMERA>& cameras,
const typename CAMERA::MeasurementVector& measurements) {
const size_t nrMeasurements = measurements.size();
assert(nrMeasurements == cameras.size());
Point3Vector calibratedMeasurements(nrMeasurements);
for (size_t ii = 0; ii < nrMeasurements; ++ii) {
calibratedMeasurements[ii]
<< cameras[ii].calibration().calibrate(measurements[ii]),
1.0;
}
return calibratedMeasurements;
}
/** Specialize for SphericalCamera to do nothing. */
template <class CAMERA = SphericalCamera>
inline Point3Vector calibrateMeasurements(
const CameraSet<SphericalCamera>& cameras,
const SphericalCamera::MeasurementVector& measurements) {
Point3Vector calibratedMeasurements(measurements.size());
for (size_t ii = 0; ii < measurements.size(); ++ii) {
calibratedMeasurements[ii] << measurements[ii].point3();
}
return calibratedMeasurements;
}
/**
* Function to triangulate 3D landmark point from an arbitrary number
* of poses (at least 2) using the DLT. The function checks that the
@ -340,41 +414,55 @@ inline SphericalCamera::MeasurementVector undistortMeasurements(
* @param measurements A vector of camera measurements
* @param rank_tol rank tolerance, default 1e-9
* @param optimize Flag to turn on nonlinear refinement of triangulation
* @param useLOST whether to use the LOST algorithm instead of DLT
* @return Returns a Point3
*/
template<class CALIBRATION>
template <class CALIBRATION>
Point3 triangulatePoint3(const std::vector<Pose3>& poses,
boost::shared_ptr<CALIBRATION> sharedCal,
const Point2Vector& measurements, double rank_tol = 1e-9,
bool optimize = false,
const SharedNoiseModel& model = nullptr) {
boost::shared_ptr<CALIBRATION> sharedCal,
const Point2Vector& measurements,
double rank_tol = 1e-9, bool optimize = false,
const SharedNoiseModel& model = nullptr,
const bool useLOST = false) {
assert(poses.size() == measurements.size());
if (poses.size() < 2)
throw(TriangulationUnderconstrainedException());
// construct projection matrices from poses & calibration
auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal);
// Undistort the measurements, leaving only the pinhole elements in effect.
auto undistortedMeasurements =
undistortMeasurements<CALIBRATION>(*sharedCal, measurements);
if (poses.size() < 2) throw(TriangulationUnderconstrainedException());
// Triangulate linearly
Point3 point =
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
Point3 point;
if (useLOST) {
// Reduce input noise model to an isotropic noise model using the mean of
// the diagonal.
const double measurementSigma = model ? model->sigmas().mean() : 1e-4;
SharedIsotropic measurementNoise =
noiseModel::Isotropic::Sigma(2, measurementSigma);
// calibrate the measurements to obtain homogenous coordinates in image
// plane.
auto calibratedMeasurements =
calibrateMeasurementsShared<CALIBRATION>(*sharedCal, measurements);
point = triangulateLOST(poses, calibratedMeasurements, measurementNoise);
} else {
// construct projection matrices from poses & calibration
auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal);
// Undistort the measurements, leaving only the pinhole elements in effect.
auto undistortedMeasurements =
undistortMeasurements<CALIBRATION>(*sharedCal, measurements);
point =
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
}
// Then refine using non-linear optimization
if (optimize)
point = triangulateNonlinear<CALIBRATION> //
point = triangulateNonlinear<CALIBRATION> //
(poses, sharedCal, measurements, point, model);
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
// verify that the triangulated point lies in front of all cameras
for(const Pose3& pose: poses) {
for (const Pose3& pose : poses) {
const Point3& p_local = pose.transformTo(point);
if (p_local.z() <= 0)
throw(TriangulationCheiralityException());
if (p_local.z() <= 0) throw(TriangulationCheiralityException());
}
#endif
@ -391,41 +479,63 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
* @param measurements A vector of camera measurements
* @param rank_tol rank tolerance, default 1e-9
* @param optimize Flag to turn on nonlinear refinement of triangulation
* @param useLOST whether to use the LOST algorithm instead of
* DLT
* @return Returns a Point3
*/
template<class CAMERA>
Point3 triangulatePoint3(
const CameraSet<CAMERA>& cameras,
const typename CAMERA::MeasurementVector& measurements, double rank_tol = 1e-9,
bool optimize = false,
const SharedNoiseModel& model = nullptr) {
template <class CAMERA>
Point3 triangulatePoint3(const CameraSet<CAMERA>& cameras,
const typename CAMERA::MeasurementVector& measurements,
double rank_tol = 1e-9, bool optimize = false,
const SharedNoiseModel& model = nullptr,
const bool useLOST = false) {
size_t m = cameras.size();
assert(measurements.size() == m);
if (m < 2)
throw(TriangulationUnderconstrainedException());
if (m < 2) throw(TriangulationUnderconstrainedException());
// construct projection matrices from poses & calibration
auto projection_matrices = projectionMatricesFromCameras(cameras);
// Triangulate linearly
Point3 point;
if (useLOST) {
// Reduce input noise model to an isotropic noise model using the mean of
// the diagonal.
const double measurementSigma = model ? model->sigmas().mean() : 1e-4;
SharedIsotropic measurementNoise =
noiseModel::Isotropic::Sigma(2, measurementSigma);
// Undistort the measurements, leaving only the pinhole elements in effect.
auto undistortedMeasurements =
undistortMeasurements<CAMERA>(cameras, measurements);
// construct poses from cameras.
std::vector<Pose3> poses;
poses.reserve(cameras.size());
for (const auto& camera : cameras) poses.push_back(camera.pose());
Point3 point =
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
// calibrate the measurements to obtain homogenous coordinates in image
// plane.
auto calibratedMeasurements =
calibrateMeasurements<CAMERA>(cameras, measurements);
// The n refine using non-linear optimization
if (optimize)
point = triangulateLOST(poses, calibratedMeasurements, measurementNoise);
} else {
// construct projection matrices from poses & calibration
auto projection_matrices = projectionMatricesFromCameras(cameras);
// Undistort the measurements, leaving only the pinhole elements in effect.
auto undistortedMeasurements =
undistortMeasurements<CAMERA>(cameras, measurements);
point =
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
}
// Then refine using non-linear optimization
if (optimize) {
point = triangulateNonlinear<CAMERA>(cameras, measurements, point, model);
}
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
// verify that the triangulated point lies in front of all cameras
for(const CAMERA& camera: cameras) {
for (const CAMERA& camera : cameras) {
const Point3& p_local = camera.pose().transformTo(point);
if (p_local.z() <= 0)
throw(TriangulationCheiralityException());
if (p_local.z() <= 0) throw(TriangulationCheiralityException());
}
#endif
@ -433,14 +543,14 @@ Point3 triangulatePoint3(
}
/// Pinhole-specific version
template<class CALIBRATION>
Point3 triangulatePoint3(
const CameraSet<PinholeCamera<CALIBRATION> >& cameras,
const Point2Vector& measurements, double rank_tol = 1e-9,
bool optimize = false,
const SharedNoiseModel& model = nullptr) {
return triangulatePoint3<PinholeCamera<CALIBRATION> > //
(cameras, measurements, rank_tol, optimize, model);
template <class CALIBRATION>
Point3 triangulatePoint3(const CameraSet<PinholeCamera<CALIBRATION>>& cameras,
const Point2Vector& measurements,
double rank_tol = 1e-9, bool optimize = false,
const SharedNoiseModel& model = nullptr,
const bool useLOST = false) {
return triangulatePoint3<PinholeCamera<CALIBRATION>> //
(cameras, measurements, rank_tol, optimize, model, useLOST);
}
struct GTSAM_EXPORT TriangulationParameters {