update triangulation LOST API

release/4.3a0
Akshay Krishnan 2022-07-10 16:49:15 -07:00
parent ca4b450e7e
commit 63e0446f0b
4 changed files with 247 additions and 194 deletions

View File

@ -33,6 +33,7 @@ namespace gtsam {
/// As of GTSAM 4, in order to make GTSAM more lean,
/// it is now possible to just typedef Point3 to Vector3
typedef Vector3 Point3;
typedef std::vector<Point3, Eigen::aligned_allocator<Point3> > Point3Vector;
// Convenience typedef
using Point3Pair = std::pair<Point3, Point3>;

View File

@ -96,23 +96,33 @@ TEST(triangulation, twoPoses) {
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-4));
}
TEST(triangulation, twoCamerasLOST) {
std::vector<PinholeCamera<Cal3_S2>> cameras = {camera1, camera2};
std::vector<Point2> measurements = {z1, z2};
TEST(triangulation, twoCamerasUsingLOST) {
CameraSet<PinholeCamera<Cal3_S2>> cameras;
cameras.push_back(camera1);
cameras.push_back(camera2);
Point2Vector measurements = {z1, z2};
SharedNoiseModel measurementNoise = noiseModel::Isotropic::Sigma(2, 1e-4);
double rank_tol = 1e-9;
// 1. Test simple triangulation, perfect in no noise situation
Point3 actual1 = //
triangulateLOSTPoint3<Cal3_S2>(cameras, measurements);
EXPECT(assert_equal(landmark, actual1, 1e-12));
boost::optional<Point3> actual1 = //
triangulatePoint3<PinholeCamera<Cal3_S2>>(
cameras, measurements, rank_tol,
/*optimize=*/false, measurementNoise,
/*use_lost_triangulation=*/true);
EXPECT(assert_equal(landmark, *actual1, 1e-12));
// 3. Add some noise and try again: result should be ~ (4.995,
// 0.499167, 1.19814)
measurements[0] += Point2(0.1, 0.5);
measurements[1] += Point2(-0.2, 0.3);
const double measurement_sigma = 1e-3;
Point3 actual2 = //
triangulateLOSTPoint3<Cal3_S2>(cameras, measurements, measurement_sigma);
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), actual2, 1e-4));
boost::optional<Point3> actual2 = //
triangulatePoint3<PinholeCamera<Cal3_S2>>(
cameras, measurements, rank_tol,
/*optimize=*/false, measurementNoise,
/*use_lost_triangulation=*/true);
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual2, 1e-4));
}
TEST(triangulation, twoCamerasLOSTvsDLT) {
@ -121,33 +131,32 @@ TEST(triangulation, twoCamerasLOSTvsDLT) {
Cal3_S2 identity_K;
Pose3 pose_1;
Pose3 pose_2(Rot3(), Point3(5., 0., -5.));
PinholeCamera<Cal3_S2> camera_1(pose_1, identity_K);
PinholeCamera<Cal3_S2> camera_2(pose_2, identity_K);
CameraSet<PinholeCamera<Cal3_S2>> cameras;
cameras.emplace_back(pose_1, identity_K);
cameras.emplace_back(pose_2, identity_K);
Point3 gt_point(0, 0, 1);
Point2 x1 = camera_1.project(gt_point);
Point2 x2 = camera_2.project(gt_point);
Point2 x1_noisy = x1 + Point2(0.00817, 0.00977);
Point2 x2_noisy = x2 + Point2(-0.00610, 0.01969);
const double measurement_sigma = 1e-2;
Point3 estimate_lost = triangulateLOSTPoint3<Cal3_S2>(
{camera_1, camera_2}, {x1_noisy, x2_noisy}, measurement_sigma);
// These values are from a MATLAB implementation.
EXPECT(assert_equal(Point3(0.007, 0.011, 0.945), estimate_lost, 1e-3));
Point2 x1_noisy = cameras[0].project(gt_point) + Point2(0.00817, 0.00977);
Point2 x2_noisy = cameras[1].project(gt_point) + Point2(-0.00610, 0.01969);
Point2Vector measurements = {x1_noisy, x2_noisy};
double rank_tol = 1e-9;
SharedNoiseModel measurementNoise = noiseModel::Isotropic::Sigma(2, 1e-2);
boost::optional<Point3> estimate_lost = //
triangulatePoint3<PinholeCamera<Cal3_S2>>(
cameras, measurements, rank_tol,
/*optimize=*/false, measurementNoise,
/*use_lost_triangulation=*/true);
// These values are from a MATLAB implementation.
EXPECT(assert_equal(Point3(0.007, 0.011, 0.945), *estimate_lost, 1e-3));
Pose3Vector poses = {pose_1, pose_2};
Point2Vector points = {x1_noisy, x2_noisy};
boost::shared_ptr<Cal3_S2> cal = boost::make_shared<Cal3_S2>(identity_K);
boost::optional<Point3> estimate_dlt =
triangulatePoint3<Cal3_S2>(poses, cal, points, rank_tol, false);
triangulatePoint3<Cal3_S2>(cameras, measurements, rank_tol, false);
// The LOST estimate should have a smaller error.
EXPECT((gt_point - estimate_lost).norm() <=
EXPECT((gt_point - *estimate_lost).norm() <=
(gt_point - *estimate_dlt).norm());
}

View File

@ -24,9 +24,9 @@
namespace gtsam {
Vector4 triangulateHomogeneousDLT(
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
projection_matrices,
const Point2Vector& measurements, double rank_tol) {
// number of cameras
size_t m = projection_matrices.size();
@ -52,61 +52,75 @@ Vector4 triangulateHomogeneousDLT(
Vector v;
boost::tie(rank, error, v) = DLT(A, rank_tol);
if (rank < 3)
throw(TriangulationUnderconstrainedException());
if (rank < 3) throw(TriangulationUnderconstrainedException());
return v;
}
Vector3 triangulateLOSTHomogeneous(
const std::vector<Pose3>& poses,
const std::vector<Point3>& calibrated_measurements,
const double measurement_sigma) {
size_t m = calibrated_measurements.size();
assert(m == poses.size());
Vector4 triangulateHomogeneousDLT(
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
projection_matrices,
const std::vector<Unit3>& measurements, double rank_tol) {
// number of cameras
size_t m = projection_matrices.size();
// Construct the system matrices.
// Allocate DLT matrix
Matrix A = Matrix::Zero(m * 2, 4);
for (size_t i = 0; i < m; i++) {
const Pose3& wTi = poses[i];
// TODO(akshay-krishnan): are there better ways to select j?
const int j = (i + 1) % m;
const Pose3& wTj = poses[j];
size_t row = i * 2;
const Matrix34& projection = projection_matrices.at(i);
const Point3& p =
measurements.at(i)
.point3(); // to get access to x,y,z of the bearing vector
const Point3 d_ij = wTj.translation() - wTi.translation();
const Point3 w_measurement_i =
wTi.rotation().rotate(calibrated_measurements[i]);
const Point3 w_measurement_j =
wTj.rotation().rotate(calibrated_measurements[j]);
const double q_i = w_measurement_i.cross(w_measurement_j).norm() /
(measurement_sigma * d_ij.cross(w_measurement_j).norm());
const Matrix23 coefficient_mat =
q_i * skewSymmetric(calibrated_measurements[i]).topLeftCorner(2, 3) *
wTi.rotation().matrix().transpose();
A.block<2, 4>(2 * i, 0) << coefficient_mat, -coefficient_mat * wTi.translation();
// build system of equations
A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0);
A.row(row + 1) = p.y() * projection.row(2) - p.z() * projection.row(1);
}
const double rank_tol = 1e-6;
int rank;
double error;
Vector v;
boost::tie(rank, error, v) = DLT(A, rank_tol);
if (rank < 3)
throw(TriangulationUnderconstrainedException());
if (rank < 3) throw(TriangulationUnderconstrainedException());
return v;
}
Point3 triangulateDLT(const std::vector<Pose3>& poses,
const Point3Vector& homogenousMeasurements,
double rank_tol) {
// number of cameras
size_t m = poses.size();
// Allocate DLT matrix
Matrix A = Matrix::Zero(m * 2, 4);
for (size_t i = 0; i < m; i++) {
size_t row = i * 2;
const Matrix34 projection = poses[i].matrix().block(0, 0, 3, 4);
const Point3& p = homogenousMeasurements[i]; // to get access to x,y,z of
// the bearing vector
// build system of equations
A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0);
A.row(row + 1) = p.y() * projection.row(2) - p.z() * projection.row(1);
}
int rank;
double error;
Vector v;
boost::tie(rank, error, v) = DLT(A, rank_tol);
if (rank < 3) throw(TriangulationUnderconstrainedException());
return Point3(v.head<3>() / v[3]);
}
Vector3 triangulateLOSTHomogeneousLS(
const std::vector<Pose3>& poses,
const std::vector<Point3>& calibrated_measurements,
const double measurement_sigma) {
size_t m = calibrated_measurements.size();
Point3 triangulateLOST(const std::vector<Pose3>& poses,
const Point3Vector& calibratedMeasurements,
const SharedIsotropic& measurementNoise) {
size_t m = calibratedMeasurements.size();
assert(m == poses.size());
// Construct the system matrices.
@ -121,68 +135,41 @@ Vector3 triangulateLOSTHomogeneousLS(
const Point3 d_ij = wTj.translation() - wTi.translation();
const Point3 w_measurement_i = wTi.rotation().rotate(calibrated_measurements[i]);
const Point3 w_measurement_j = wTj.rotation().rotate(calibrated_measurements[j]);
const Point3 w_measurement_i =
wTi.rotation().rotate(calibratedMeasurements[i]);
const Point3 w_measurement_j =
wTj.rotation().rotate(calibratedMeasurements[j]);
const double q_i = w_measurement_i.cross(w_measurement_j).norm() /
(measurement_sigma * d_ij.cross(w_measurement_j).norm());
double q_i =
w_measurement_i.cross(w_measurement_j).norm() /
(measurementNoise->sigma() * d_ij.cross(w_measurement_j).norm());
const Matrix23 coefficient_mat =
q_i * skewSymmetric(calibrated_measurements[i]).topLeftCorner(2, 3) *
q_i * skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) *
wTi.rotation().matrix().transpose();
A.block<2, 3>(2*i, 0) = coefficient_mat;
b.block<2, 1>(2*i, 0) = coefficient_mat * wTi.translation();
A.block<2, 3>(2 * i, 0) << coefficient_mat;
b.block<2, 1>(2 * i, 0) << coefficient_mat * wTi.translation();
}
return A.colPivHouseholderQr().solve(b);
}
Vector4 triangulateHomogeneousDLT(
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
const std::vector<Unit3>& measurements, double rank_tol) {
// number of cameras
size_t m = projection_matrices.size();
// Allocate DLT matrix
Matrix A = Matrix::Zero(m * 2, 4);
for (size_t i = 0; i < m; i++) {
size_t row = i * 2;
const Matrix34& projection = projection_matrices.at(i);
const Point3& p = measurements.at(i).point3(); // to get access to x,y,z of the bearing vector
// build system of equations
A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0);
A.row(row + 1) = p.y() * projection.row(2) - p.z() * projection.row(1);
}
int rank;
double error;
Vector v;
boost::tie(rank, error, v) = DLT(A, rank_tol);
if (rank < 3)
throw(TriangulationUnderconstrainedException());
return v;
}
Point3 triangulateDLT(
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
projection_matrices,
const Point2Vector& measurements, double rank_tol) {
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements,
rank_tol);
Vector4 v =
triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol);
// Create 3D point from homogeneous coordinates
return Point3(v.head<3>() / v[3]);
}
Point3 triangulateDLT(
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>&
projection_matrices,
const std::vector<Unit3>& measurements, double rank_tol) {
// contrary to previous triangulateDLT, this is now taking Unit3 inputs
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements,
rank_tol);
Vector4 v =
triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol);
// Create 3D point from homogeneous coordinates
return Point3(v.head<3>() / v[3]);
}
@ -215,4 +202,4 @@ Point3 optimize(const NonlinearFactorGraph& graph, const Values& values,
return result.at<Point3>(landmarkKey);
}
} // \namespace gtsam
} // namespace gtsam

View File

@ -62,24 +62,6 @@ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT(
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
const Point2Vector& measurements, double rank_tol = 1e-9);
/**
* @brief
*
* @param projection_matrices
* @param measurements
* @param rank_tol
* @return GTSAM_EXPORT
*/
GTSAM_EXPORT Vector3
triangulateLOSTHomogeneous(const std::vector<Pose3>& poses,
const std::vector<Point3>& calibrated_measurements,
const double measurement_sigma);
GTSAM_EXPORT Vector3
triangulateLOSTHomogeneousLS(const std::vector<Pose3>& poses,
const std::vector<Point3>& calibrated_measurements,
const double measurement_sigma);
/**
* Same math as Hartley and Zisserman, 2nd Ed., page 312, but with unit-norm bearing vectors
* (contrarily to pinhole projection, the z entry is not assumed to be 1 as in Hartley and Zisserman)
@ -112,6 +94,20 @@ GTSAM_EXPORT Point3 triangulateDLT(
const std::vector<Unit3>& measurements,
double rank_tol = 1e-9);
/**
* @brief Triangulation using the LOST (Linear Optimal Sine Triangulation)
* algorithm proposed in https://arxiv.org/pdf/2205.12197.pdf by Sebastien Henry
* and John Christian.
* @param poses camera poses in world frame
* @param calibratedMeasurements measurements in homogeneous coordinates in each
* camera pose
* @param measurementNoise isotropic noise model for the measurements
* @return triangulated point in world coordinates
*/
GTSAM_EXPORT Point3 triangulateLOST(const std::vector<Pose3>& poses,
const Point3Vector& calibratedMeasurements,
const SharedIsotropic& measurementNoise);
/**
* Create a factor graph with projection factors from poses and one calibration
* @param poses Camera poses
@ -320,8 +316,8 @@ template <class CAMERA>
typename CAMERA::MeasurementVector undistortMeasurements(
const CameraSet<CAMERA>& cameras,
const typename CAMERA::MeasurementVector& measurements) {
const size_t num_meas = cameras.size();
assert(num_meas == measurements.size());
const size_t num_meas = measurements.size();
assert(num_meas == cameras.size());
typename CAMERA::MeasurementVector undistortedMeasurements(num_meas);
for (size_t ii = 0; ii < num_meas; ++ii) {
// Calibrate with cal and uncalibrate with pinhole version of cal so that
@ -349,6 +345,65 @@ inline SphericalCamera::MeasurementVector undistortMeasurements(
return measurements;
}
/** Convert pixel measurements in image to homogeneous measurements in the image
* plane using shared camera intrinsics.
*
* @tparam CALIBRATION Calibration type to use.
* @param cal Calibration with which measurements were taken.
* @param measurements Vector of measurements to undistort.
* @return homogeneous measurements in image plane
*/
template <class CALIBRATION>
inline Point3Vector calibrateMeasurementsShared(
const CALIBRATION& cal, const Point2Vector& measurements) {
Point3Vector calibratedMeasurements;
// Calibrate with cal and uncalibrate with pinhole version of cal so that
// measurements are undistorted.
std::transform(measurements.begin(), measurements.end(),
std::back_inserter(calibratedMeasurements),
[&cal](const Point2& measurement) {
Point3 p;
p << cal.calibrate(measurement), 1.0;
return p;
});
return calibratedMeasurements;
}
/** Convert pixel measurements in image to homogeneous measurements in the image
* plane using camera intrinsics of each measurement.
*
* @tparam CAMERA Camera type to use.
* @param cameras Cameras corresponding to each measurement.
* @param measurements Vector of measurements to undistort.
* @return homogeneous measurements in image plane
*/
template <class CAMERA>
inline Point3Vector calibrateMeasurements(
const CameraSet<CAMERA>& cameras,
const typename CAMERA::MeasurementVector& measurements) {
const size_t num_meas = measurements.size();
assert(num_meas == cameras.size());
Point3Vector calibratedMeasurements(num_meas);
for (size_t ii = 0; ii < num_meas; ++ii) {
calibratedMeasurements[ii]
<< cameras[ii].calibration().calibrate(measurements[ii]),
1.0;
}
return calibratedMeasurements;
}
/** Specialize for SphericalCamera to do nothing. */
template <class CAMERA = SphericalCamera>
inline Point3Vector calibrateMeasurements(
const CameraSet<SphericalCamera>& cameras,
const SphericalCamera::MeasurementVector& measurements) {
Point3Vector calibratedMeasurements(measurements.size());
for (size_t ii = 0; ii < measurements.size(); ++ii) {
calibratedMeasurements[ii] << measurements[ii].point3();
}
return calibratedMeasurements;
}
/**
* Function to triangulate 3D landmark point from an arbitrary number
* of poses (at least 2) using the DLT. The function checks that the
@ -359,6 +414,7 @@ inline SphericalCamera::MeasurementVector undistortMeasurements(
* @param measurements A vector of camera measurements
* @param rank_tol rank tolerance, default 1e-9
* @param optimize Flag to turn on nonlinear refinement of triangulation
* @param use_lost_triangulation whether to use the LOST algorithm instead of DLT
* @return Returns a Point3
*/
template<class CALIBRATION>
@ -366,12 +422,27 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
boost::shared_ptr<CALIBRATION> sharedCal,
const Point2Vector& measurements, double rank_tol = 1e-9,
bool optimize = false,
const SharedNoiseModel& model = nullptr) {
const SharedNoiseModel& model = nullptr,
const bool use_lost_triangulation = true) {
assert(poses.size() == measurements.size());
if (poses.size() < 2)
throw(TriangulationUnderconstrainedException());
// Triangulate linearly
Point3 point;
if(use_lost_triangulation) {
// Reduce input noise model to an isotropic noise model using the mean of
// the diagonal.
const double measurementSigma = model ? model->sigmas().mean() : 1e-4;
SharedIsotropic measurementNoise =
noiseModel::Isotropic::Sigma(2, measurementSigma);
// calibrate the measurements to obtain homogenous coordinates in image plane.
auto calibratedMeasurements =
calibrateMeasurementsShared<CALIBRATION>(*sharedCal, measurements);
point = triangulateLOST(poses, calibratedMeasurements, measurementNoise);
} else {
// construct projection matrices from poses & calibration
auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal);
@ -379,9 +450,8 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
auto undistortedMeasurements =
undistortMeasurements<CALIBRATION>(*sharedCal, measurements);
// Triangulate linearly
Point3 point =
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
point = triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
}
// Then refine using non-linear optimization
if (optimize)
@ -400,45 +470,6 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
return point;
}
template <class CALIBRATION>
Point3 triangulateLOSTPoint3(
const CameraSet<PinholeCamera<CALIBRATION>>& cameras,
const Point2Vector& measurements,
const double measurement_sigma = 1e-2, bool use_dlt = false) {
const size_t num_cameras = cameras.size();
assert(measurements.size() == num_cameras);
if (num_cameras < 2) throw(TriangulationUnderconstrainedException());
// Convert measurements to image plane coordinates.
std::vector<Point3> calibrated_measurements;
calibrated_measurements.reserve(measurements.size());
for (int i = 0; i < measurements.size(); ++i) {
Point2 p = cameras[i].calibration().calibrate(measurements[i]);
calibrated_measurements.emplace_back(p.x(), p.y(), 1.0);
}
std::vector<Pose3> poses;
poses.reserve(cameras.size());
for (const auto& camera : cameras) poses.push_back(camera.pose());
Point3 point = use_dlt
? triangulateLOSTHomogeneous(
poses, calibrated_measurements, measurement_sigma)
: triangulateLOSTHomogeneousLS(
poses, calibrated_measurements, measurement_sigma);
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
// verify that the triangulated point lies in front of all cameras
for (const auto& camera : cameras) {
const Point3& p_local = camera.pose().transformTo(point);
if (p_local.z() <= 0) throw(TriangulationCheiralityException());
}
#endif
return point;
}
/**
* Function to triangulate 3D landmark point from an arbitrary number
* of poses (at least 2) using the DLT. This function is similar to the one
@ -449,6 +480,7 @@ Point3 triangulateLOSTPoint3(
* @param measurements A vector of camera measurements
* @param rank_tol rank tolerance, default 1e-9
* @param optimize Flag to turn on nonlinear refinement of triangulation
* @param use_lost_triangulation whether to use the LOST algorithm instead of DLT
* @return Returns a Point3
*/
template<class CAMERA>
@ -456,7 +488,8 @@ Point3 triangulatePoint3(
const CameraSet<CAMERA>& cameras,
const typename CAMERA::MeasurementVector& measurements, double rank_tol = 1e-9,
bool optimize = false,
const SharedNoiseModel& model = nullptr) {
const SharedNoiseModel& model = nullptr,
const bool use_lost_triangulation=false) {
size_t m = cameras.size();
assert(measurements.size() == m);
@ -464,6 +497,27 @@ Point3 triangulatePoint3(
if (m < 2)
throw(TriangulationUnderconstrainedException());
// Triangulate linearly
Point3 point;
if(use_lost_triangulation) {
// Reduce input noise model to an isotropic noise model using the mean of
// the diagonal.
const double measurementSigma = model ? model->sigmas().mean() : 1e-4;
SharedIsotropic measurementNoise =
noiseModel::Isotropic::Sigma(2, measurementSigma);
// construct poses from cameras.
std::vector<Pose3> poses;
poses.reserve(cameras.size());
for (const auto& camera : cameras) poses.push_back(camera.pose());
// calibrate the measurements to obtain homogenous coordinates in image
// plane.
auto calibratedMeasurements =
calibrateMeasurements<CAMERA>(cameras, measurements);
point = triangulateLOST(poses, calibratedMeasurements, measurementNoise);
} else {
// construct projection matrices from poses & calibration
auto projection_matrices = projectionMatricesFromCameras(cameras);
@ -471,12 +525,13 @@ Point3 triangulatePoint3(
auto undistortedMeasurements =
undistortMeasurements<CAMERA>(cameras, measurements);
Point3 point =
triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
point = triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol);
}
// Then refine using non-linear optimization
if (optimize)
if (optimize) {
point = triangulateNonlinear<CAMERA>(cameras, measurements, point, model);
}
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
// verify that the triangulated point lies in front of all cameras
@ -496,9 +551,10 @@ Point3 triangulatePoint3(
const CameraSet<PinholeCamera<CALIBRATION> >& cameras,
const Point2Vector& measurements, double rank_tol = 1e-9,
bool optimize = false,
const SharedNoiseModel& model = nullptr) {
const SharedNoiseModel& model = nullptr,
const bool use_lost_triangulation = false) {
return triangulatePoint3<PinholeCamera<CALIBRATION> > //
(cameras, measurements, rank_tol, optimize, model);
(cameras, measurements, rank_tol, optimize, model, use_lost_triangulation);
}
struct GTSAM_EXPORT TriangulationParameters {