From 63e0446f0b04fe23e1224b19e9f819c0f9c9a249 Mon Sep 17 00:00:00 2001 From: Akshay Krishnan Date: Sun, 10 Jul 2022 16:49:15 -0700 Subject: [PATCH] update triangulation LOST API --- gtsam/geometry/Point3.h | 1 + gtsam/geometry/tests/testTriangulation.cpp | 63 +++--- gtsam/geometry/triangulation.cpp | 159 +++++++-------- gtsam/geometry/triangulation.h | 218 +++++++++++++-------- 4 files changed, 247 insertions(+), 194 deletions(-) diff --git a/gtsam/geometry/Point3.h b/gtsam/geometry/Point3.h index 835c53d7c..b1ac6195b 100644 --- a/gtsam/geometry/Point3.h +++ b/gtsam/geometry/Point3.h @@ -33,6 +33,7 @@ namespace gtsam { /// As of GTSAM 4, in order to make GTSAM more lean, /// it is now possible to just typedef Point3 to Vector3 typedef Vector3 Point3; +typedef std::vector > Point3Vector; // Convenience typedef using Point3Pair = std::pair; diff --git a/gtsam/geometry/tests/testTriangulation.cpp b/gtsam/geometry/tests/testTriangulation.cpp index 0b885d434..731558327 100644 --- a/gtsam/geometry/tests/testTriangulation.cpp +++ b/gtsam/geometry/tests/testTriangulation.cpp @@ -96,23 +96,33 @@ TEST(triangulation, twoPoses) { EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-4)); } -TEST(triangulation, twoCamerasLOST) { - std::vector> cameras = {camera1, camera2}; - std::vector measurements = {z1, z2}; +TEST(triangulation, twoCamerasUsingLOST) { + CameraSet> cameras; + cameras.push_back(camera1); + cameras.push_back(camera2); + + Point2Vector measurements = {z1, z2}; + SharedNoiseModel measurementNoise = noiseModel::Isotropic::Sigma(2, 1e-4); + double rank_tol = 1e-9; // 1. Test simple triangulation, perfect in no noise situation - Point3 actual1 = // - triangulateLOSTPoint3(cameras, measurements); - EXPECT(assert_equal(landmark, actual1, 1e-12)); + boost::optional actual1 = // + triangulatePoint3>( + cameras, measurements, rank_tol, + /*optimize=*/false, measurementNoise, + /*use_lost_triangulation=*/true); + EXPECT(assert_equal(landmark, *actual1, 1e-12)); // 3. Add some noise and try again: result should be ~ (4.995, // 0.499167, 1.19814) measurements[0] += Point2(0.1, 0.5); measurements[1] += Point2(-0.2, 0.3); - const double measurement_sigma = 1e-3; - Point3 actual2 = // - triangulateLOSTPoint3(cameras, measurements, measurement_sigma); - EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), actual2, 1e-4)); + boost::optional actual2 = // + triangulatePoint3>( + cameras, measurements, rank_tol, + /*optimize=*/false, measurementNoise, + /*use_lost_triangulation=*/true); + EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual2, 1e-4)); } TEST(triangulation, twoCamerasLOSTvsDLT) { @@ -121,33 +131,32 @@ TEST(triangulation, twoCamerasLOSTvsDLT) { Cal3_S2 identity_K; Pose3 pose_1; Pose3 pose_2(Rot3(), Point3(5., 0., -5.)); - PinholeCamera camera_1(pose_1, identity_K); - PinholeCamera camera_2(pose_2, identity_K); + CameraSet> cameras; + cameras.emplace_back(pose_1, identity_K); + cameras.emplace_back(pose_2, identity_K); Point3 gt_point(0, 0, 1); - Point2 x1 = camera_1.project(gt_point); - Point2 x2 = camera_2.project(gt_point); + Point2 x1_noisy = cameras[0].project(gt_point) + Point2(0.00817, 0.00977); + Point2 x2_noisy = cameras[1].project(gt_point) + Point2(-0.00610, 0.01969); + Point2Vector measurements = {x1_noisy, x2_noisy}; - Point2 x1_noisy = x1 + Point2(0.00817, 0.00977); - Point2 x2_noisy = x2 + Point2(-0.00610, 0.01969); + double rank_tol = 1e-9; + SharedNoiseModel measurementNoise = noiseModel::Isotropic::Sigma(2, 1e-2); - const double measurement_sigma = 1e-2; - Point3 estimate_lost = triangulateLOSTPoint3( - {camera_1, camera_2}, {x1_noisy, x2_noisy}, measurement_sigma); + boost::optional estimate_lost = // + triangulatePoint3>( + cameras, measurements, rank_tol, + /*optimize=*/false, measurementNoise, + /*use_lost_triangulation=*/true); // These values are from a MATLAB implementation. - EXPECT(assert_equal(Point3(0.007, 0.011, 0.945), estimate_lost, 1e-3)); - - double rank_tol = 1e-9; + EXPECT(assert_equal(Point3(0.007, 0.011, 0.945), *estimate_lost, 1e-3)); - Pose3Vector poses = {pose_1, pose_2}; - Point2Vector points = {x1_noisy, x2_noisy}; - boost::shared_ptr cal = boost::make_shared(identity_K); boost::optional estimate_dlt = - triangulatePoint3(poses, cal, points, rank_tol, false); + triangulatePoint3(cameras, measurements, rank_tol, false); // The LOST estimate should have a smaller error. - EXPECT((gt_point - estimate_lost).norm() <= + EXPECT((gt_point - *estimate_lost).norm() <= (gt_point - *estimate_dlt).norm()); } diff --git a/gtsam/geometry/triangulation.cpp b/gtsam/geometry/triangulation.cpp index 9d8325d68..c2c798d8b 100644 --- a/gtsam/geometry/triangulation.cpp +++ b/gtsam/geometry/triangulation.cpp @@ -24,9 +24,9 @@ namespace gtsam { Vector4 triangulateHomogeneousDLT( - const std::vector>& projection_matrices, + const std::vector>& + projection_matrices, const Point2Vector& measurements, double rank_tol) { - // number of cameras size_t m = projection_matrices.size(); @@ -52,61 +52,75 @@ Vector4 triangulateHomogeneousDLT( Vector v; boost::tie(rank, error, v) = DLT(A, rank_tol); - if (rank < 3) - throw(TriangulationUnderconstrainedException()); + if (rank < 3) throw(TriangulationUnderconstrainedException()); return v; } -Vector3 triangulateLOSTHomogeneous( - const std::vector& poses, - const std::vector& calibrated_measurements, - const double measurement_sigma) { - size_t m = calibrated_measurements.size(); - assert(m == poses.size()); +Vector4 triangulateHomogeneousDLT( + const std::vector>& + projection_matrices, + const std::vector& measurements, double rank_tol) { + // number of cameras + size_t m = projection_matrices.size(); - // Construct the system matrices. + // Allocate DLT matrix Matrix A = Matrix::Zero(m * 2, 4); for (size_t i = 0; i < m; i++) { - const Pose3& wTi = poses[i]; - // TODO(akshay-krishnan): are there better ways to select j? - const int j = (i + 1) % m; - const Pose3& wTj = poses[j]; + size_t row = i * 2; + const Matrix34& projection = projection_matrices.at(i); + const Point3& p = + measurements.at(i) + .point3(); // to get access to x,y,z of the bearing vector - const Point3 d_ij = wTj.translation() - wTi.translation(); - - const Point3 w_measurement_i = - wTi.rotation().rotate(calibrated_measurements[i]); - const Point3 w_measurement_j = - wTj.rotation().rotate(calibrated_measurements[j]); - - const double q_i = w_measurement_i.cross(w_measurement_j).norm() / - (measurement_sigma * d_ij.cross(w_measurement_j).norm()); - const Matrix23 coefficient_mat = - q_i * skewSymmetric(calibrated_measurements[i]).topLeftCorner(2, 3) * - wTi.rotation().matrix().transpose(); - - A.block<2, 4>(2 * i, 0) << coefficient_mat, -coefficient_mat * wTi.translation(); + // build system of equations + A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0); + A.row(row + 1) = p.y() * projection.row(2) - p.z() * projection.row(1); } - - const double rank_tol = 1e-6; int rank; double error; Vector v; boost::tie(rank, error, v) = DLT(A, rank_tol); - if (rank < 3) - throw(TriangulationUnderconstrainedException()); + if (rank < 3) throw(TriangulationUnderconstrainedException()); + + return v; +} + +Point3 triangulateDLT(const std::vector& poses, + const Point3Vector& homogenousMeasurements, + double rank_tol) { + // number of cameras + size_t m = poses.size(); + + // Allocate DLT matrix + Matrix A = Matrix::Zero(m * 2, 4); + + for (size_t i = 0; i < m; i++) { + size_t row = i * 2; + const Matrix34 projection = poses[i].matrix().block(0, 0, 3, 4); + const Point3& p = homogenousMeasurements[i]; // to get access to x,y,z of + // the bearing vector + + // build system of equations + A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0); + A.row(row + 1) = p.y() * projection.row(2) - p.z() * projection.row(1); + } + int rank; + double error; + Vector v; + boost::tie(rank, error, v) = DLT(A, rank_tol); + + if (rank < 3) throw(TriangulationUnderconstrainedException()); return Point3(v.head<3>() / v[3]); } -Vector3 triangulateLOSTHomogeneousLS( - const std::vector& poses, - const std::vector& calibrated_measurements, - const double measurement_sigma) { - size_t m = calibrated_measurements.size(); +Point3 triangulateLOST(const std::vector& poses, + const Point3Vector& calibratedMeasurements, + const SharedIsotropic& measurementNoise) { + size_t m = calibratedMeasurements.size(); assert(m == poses.size()); // Construct the system matrices. @@ -121,70 +135,43 @@ Vector3 triangulateLOSTHomogeneousLS( const Point3 d_ij = wTj.translation() - wTi.translation(); - const Point3 w_measurement_i = wTi.rotation().rotate(calibrated_measurements[i]); - const Point3 w_measurement_j = wTj.rotation().rotate(calibrated_measurements[j]); + const Point3 w_measurement_i = + wTi.rotation().rotate(calibratedMeasurements[i]); + const Point3 w_measurement_j = + wTj.rotation().rotate(calibratedMeasurements[j]); - const double q_i = w_measurement_i.cross(w_measurement_j).norm() / - (measurement_sigma * d_ij.cross(w_measurement_j).norm()); + double q_i = + w_measurement_i.cross(w_measurement_j).norm() / + (measurementNoise->sigma() * d_ij.cross(w_measurement_j).norm()); const Matrix23 coefficient_mat = - q_i * skewSymmetric(calibrated_measurements[i]).topLeftCorner(2, 3) * + q_i * skewSymmetric(calibratedMeasurements[i]).topLeftCorner(2, 3) * wTi.rotation().matrix().transpose(); - A.block<2, 3>(2*i, 0) = coefficient_mat; - b.block<2, 1>(2*i, 0) = coefficient_mat * wTi.translation(); + A.block<2, 3>(2 * i, 0) << coefficient_mat; + b.block<2, 1>(2 * i, 0) << coefficient_mat * wTi.translation(); } return A.colPivHouseholderQr().solve(b); } -Vector4 triangulateHomogeneousDLT( - const std::vector>& projection_matrices, - const std::vector& measurements, double rank_tol) { - - // number of cameras - size_t m = projection_matrices.size(); - - // Allocate DLT matrix - Matrix A = Matrix::Zero(m * 2, 4); - - for (size_t i = 0; i < m; i++) { - size_t row = i * 2; - const Matrix34& projection = projection_matrices.at(i); - const Point3& p = measurements.at(i).point3(); // to get access to x,y,z of the bearing vector - - // build system of equations - A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0); - A.row(row + 1) = p.y() * projection.row(2) - p.z() * projection.row(1); - } - int rank; - double error; - Vector v; - boost::tie(rank, error, v) = DLT(A, rank_tol); - - if (rank < 3) - throw(TriangulationUnderconstrainedException()); - - return v; -} - Point3 triangulateDLT( - const std::vector>& projection_matrices, + const std::vector>& + projection_matrices, const Point2Vector& measurements, double rank_tol) { - - Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, - rank_tol); + Vector4 v = + triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); // Create 3D point from homogeneous coordinates return Point3(v.head<3>() / v[3]); } Point3 triangulateDLT( - const std::vector>& projection_matrices, + const std::vector>& + projection_matrices, const std::vector& measurements, double rank_tol) { - // contrary to previous triangulateDLT, this is now taking Unit3 inputs - Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, - rank_tol); - // Create 3D point from homogeneous coordinates - return Point3(v.head<3>() / v[3]); + Vector4 v = + triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol); + // Create 3D point from homogeneous coordinates + return Point3(v.head<3>() / v[3]); } /// @@ -215,4 +202,4 @@ Point3 optimize(const NonlinearFactorGraph& graph, const Values& values, return result.at(landmarkKey); } -} // \namespace gtsam +} // namespace gtsam diff --git a/gtsam/geometry/triangulation.h b/gtsam/geometry/triangulation.h index 5bea4d93c..9e59a92cb 100644 --- a/gtsam/geometry/triangulation.h +++ b/gtsam/geometry/triangulation.h @@ -62,24 +62,6 @@ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT( const std::vector>& projection_matrices, const Point2Vector& measurements, double rank_tol = 1e-9); -/** - * @brief - * - * @param projection_matrices - * @param measurements - * @param rank_tol - * @return GTSAM_EXPORT - */ -GTSAM_EXPORT Vector3 -triangulateLOSTHomogeneous(const std::vector& poses, - const std::vector& calibrated_measurements, - const double measurement_sigma); - -GTSAM_EXPORT Vector3 -triangulateLOSTHomogeneousLS(const std::vector& poses, - const std::vector& calibrated_measurements, - const double measurement_sigma); - /** * Same math as Hartley and Zisserman, 2nd Ed., page 312, but with unit-norm bearing vectors * (contrarily to pinhole projection, the z entry is not assumed to be 1 as in Hartley and Zisserman) @@ -112,6 +94,20 @@ GTSAM_EXPORT Point3 triangulateDLT( const std::vector& measurements, double rank_tol = 1e-9); +/** + * @brief Triangulation using the LOST (Linear Optimal Sine Triangulation) + * algorithm proposed in https://arxiv.org/pdf/2205.12197.pdf by Sebastien Henry + * and John Christian. + * @param poses camera poses in world frame + * @param calibratedMeasurements measurements in homogeneous coordinates in each + * camera pose + * @param measurementNoise isotropic noise model for the measurements + * @return triangulated point in world coordinates + */ +GTSAM_EXPORT Point3 triangulateLOST(const std::vector& poses, + const Point3Vector& calibratedMeasurements, + const SharedIsotropic& measurementNoise); + /** * Create a factor graph with projection factors from poses and one calibration * @param poses Camera poses @@ -320,8 +316,8 @@ template typename CAMERA::MeasurementVector undistortMeasurements( const CameraSet& cameras, const typename CAMERA::MeasurementVector& measurements) { - const size_t num_meas = cameras.size(); - assert(num_meas == measurements.size()); + const size_t num_meas = measurements.size(); + assert(num_meas == cameras.size()); typename CAMERA::MeasurementVector undistortedMeasurements(num_meas); for (size_t ii = 0; ii < num_meas; ++ii) { // Calibrate with cal and uncalibrate with pinhole version of cal so that @@ -349,6 +345,65 @@ inline SphericalCamera::MeasurementVector undistortMeasurements( return measurements; } +/** Convert pixel measurements in image to homogeneous measurements in the image + * plane using shared camera intrinsics. + * + * @tparam CALIBRATION Calibration type to use. + * @param cal Calibration with which measurements were taken. + * @param measurements Vector of measurements to undistort. + * @return homogeneous measurements in image plane + */ +template +inline Point3Vector calibrateMeasurementsShared( + const CALIBRATION& cal, const Point2Vector& measurements) { + Point3Vector calibratedMeasurements; + // Calibrate with cal and uncalibrate with pinhole version of cal so that + // measurements are undistorted. + std::transform(measurements.begin(), measurements.end(), + std::back_inserter(calibratedMeasurements), + [&cal](const Point2& measurement) { + Point3 p; + p << cal.calibrate(measurement), 1.0; + return p; + }); + return calibratedMeasurements; +} + +/** Convert pixel measurements in image to homogeneous measurements in the image + * plane using camera intrinsics of each measurement. + * + * @tparam CAMERA Camera type to use. + * @param cameras Cameras corresponding to each measurement. + * @param measurements Vector of measurements to undistort. + * @return homogeneous measurements in image plane + */ +template +inline Point3Vector calibrateMeasurements( + const CameraSet& cameras, + const typename CAMERA::MeasurementVector& measurements) { + const size_t num_meas = measurements.size(); + assert(num_meas == cameras.size()); + Point3Vector calibratedMeasurements(num_meas); + for (size_t ii = 0; ii < num_meas; ++ii) { + calibratedMeasurements[ii] + << cameras[ii].calibration().calibrate(measurements[ii]), + 1.0; + } + return calibratedMeasurements; +} + +/** Specialize for SphericalCamera to do nothing. */ +template +inline Point3Vector calibrateMeasurements( + const CameraSet& cameras, + const SphericalCamera::MeasurementVector& measurements) { + Point3Vector calibratedMeasurements(measurements.size()); + for (size_t ii = 0; ii < measurements.size(); ++ii) { + calibratedMeasurements[ii] << measurements[ii].point3(); + } + return calibratedMeasurements; +} + /** * Function to triangulate 3D landmark point from an arbitrary number * of poses (at least 2) using the DLT. The function checks that the @@ -359,6 +414,7 @@ inline SphericalCamera::MeasurementVector undistortMeasurements( * @param measurements A vector of camera measurements * @param rank_tol rank tolerance, default 1e-9 * @param optimize Flag to turn on nonlinear refinement of triangulation + * @param use_lost_triangulation whether to use the LOST algorithm instead of DLT * @return Returns a Point3 */ template @@ -366,22 +422,36 @@ Point3 triangulatePoint3(const std::vector& poses, boost::shared_ptr sharedCal, const Point2Vector& measurements, double rank_tol = 1e-9, bool optimize = false, - const SharedNoiseModel& model = nullptr) { + const SharedNoiseModel& model = nullptr, + const bool use_lost_triangulation = true) { assert(poses.size() == measurements.size()); if (poses.size() < 2) throw(TriangulationUnderconstrainedException()); - // construct projection matrices from poses & calibration - auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal); - - // Undistort the measurements, leaving only the pinhole elements in effect. - auto undistortedMeasurements = - undistortMeasurements(*sharedCal, measurements); - // Triangulate linearly - Point3 point = - triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + Point3 point; + if(use_lost_triangulation) { + // Reduce input noise model to an isotropic noise model using the mean of + // the diagonal. + const double measurementSigma = model ? model->sigmas().mean() : 1e-4; + SharedIsotropic measurementNoise = + noiseModel::Isotropic::Sigma(2, measurementSigma); + // calibrate the measurements to obtain homogenous coordinates in image plane. + auto calibratedMeasurements = + calibrateMeasurementsShared(*sharedCal, measurements); + + point = triangulateLOST(poses, calibratedMeasurements, measurementNoise); + } else { + // construct projection matrices from poses & calibration + auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal); + + // Undistort the measurements, leaving only the pinhole elements in effect. + auto undistortedMeasurements = + undistortMeasurements(*sharedCal, measurements); + + point = triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + } // Then refine using non-linear optimization if (optimize) @@ -400,45 +470,6 @@ Point3 triangulatePoint3(const std::vector& poses, return point; } -template -Point3 triangulateLOSTPoint3( - const CameraSet>& cameras, - const Point2Vector& measurements, - const double measurement_sigma = 1e-2, bool use_dlt = false) { - const size_t num_cameras = cameras.size(); - assert(measurements.size() == num_cameras); - - if (num_cameras < 2) throw(TriangulationUnderconstrainedException()); - - // Convert measurements to image plane coordinates. - std::vector calibrated_measurements; - calibrated_measurements.reserve(measurements.size()); - for (int i = 0; i < measurements.size(); ++i) { - Point2 p = cameras[i].calibration().calibrate(measurements[i]); - calibrated_measurements.emplace_back(p.x(), p.y(), 1.0); - } - - std::vector poses; - poses.reserve(cameras.size()); - for (const auto& camera : cameras) poses.push_back(camera.pose()); - - Point3 point = use_dlt - ? triangulateLOSTHomogeneous( - poses, calibrated_measurements, measurement_sigma) - : triangulateLOSTHomogeneousLS( - poses, calibrated_measurements, measurement_sigma); - -#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION - // verify that the triangulated point lies in front of all cameras - for (const auto& camera : cameras) { - const Point3& p_local = camera.pose().transformTo(point); - if (p_local.z() <= 0) throw(TriangulationCheiralityException()); - } -#endif - - return point; -} - /** * Function to triangulate 3D landmark point from an arbitrary number * of poses (at least 2) using the DLT. This function is similar to the one @@ -449,6 +480,7 @@ Point3 triangulateLOSTPoint3( * @param measurements A vector of camera measurements * @param rank_tol rank tolerance, default 1e-9 * @param optimize Flag to turn on nonlinear refinement of triangulation + * @param use_lost_triangulation whether to use the LOST algorithm instead of DLT * @return Returns a Point3 */ template @@ -456,7 +488,8 @@ Point3 triangulatePoint3( const CameraSet& cameras, const typename CAMERA::MeasurementVector& measurements, double rank_tol = 1e-9, bool optimize = false, - const SharedNoiseModel& model = nullptr) { + const SharedNoiseModel& model = nullptr, + const bool use_lost_triangulation=false) { size_t m = cameras.size(); assert(measurements.size() == m); @@ -464,19 +497,41 @@ Point3 triangulatePoint3( if (m < 2) throw(TriangulationUnderconstrainedException()); - // construct projection matrices from poses & calibration - auto projection_matrices = projectionMatricesFromCameras(cameras); + // Triangulate linearly + Point3 point; + if(use_lost_triangulation) { + // Reduce input noise model to an isotropic noise model using the mean of + // the diagonal. + const double measurementSigma = model ? model->sigmas().mean() : 1e-4; + SharedIsotropic measurementNoise = + noiseModel::Isotropic::Sigma(2, measurementSigma); - // Undistort the measurements, leaving only the pinhole elements in effect. - auto undistortedMeasurements = - undistortMeasurements(cameras, measurements); + // construct poses from cameras. + std::vector poses; + poses.reserve(cameras.size()); + for (const auto& camera : cameras) poses.push_back(camera.pose()); - Point3 point = - triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + // calibrate the measurements to obtain homogenous coordinates in image + // plane. + auto calibratedMeasurements = + calibrateMeasurements(cameras, measurements); - // The n refine using non-linear optimization - if (optimize) + point = triangulateLOST(poses, calibratedMeasurements, measurementNoise); + } else { + // construct projection matrices from poses & calibration + auto projection_matrices = projectionMatricesFromCameras(cameras); + + // Undistort the measurements, leaving only the pinhole elements in effect. + auto undistortedMeasurements = + undistortMeasurements(cameras, measurements); + + point = triangulateDLT(projection_matrices, undistortedMeasurements, rank_tol); + } + + // Then refine using non-linear optimization + if (optimize) { point = triangulateNonlinear(cameras, measurements, point, model); + } #ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION // verify that the triangulated point lies in front of all cameras @@ -496,9 +551,10 @@ Point3 triangulatePoint3( const CameraSet >& cameras, const Point2Vector& measurements, double rank_tol = 1e-9, bool optimize = false, - const SharedNoiseModel& model = nullptr) { + const SharedNoiseModel& model = nullptr, + const bool use_lost_triangulation = false) { return triangulatePoint3 > // - (cameras, measurements, rank_tol, optimize, model); + (cameras, measurements, rank_tol, optimize, model, use_lost_triangulation); } struct GTSAM_EXPORT TriangulationParameters {