still challenging to parse extrinsics

release/4.3a0
lcarlone 2021-08-25 22:22:53 -04:00
parent 7b399a363a
commit b523623277
2 changed files with 88 additions and 81 deletions

View File

@ -51,7 +51,6 @@ class SmartProjectionFactorP : public SmartProjectionFactor<CAMERA> {
private: private:
typedef SmartProjectionFactor<CAMERA> Base; typedef SmartProjectionFactor<CAMERA> Base;
typedef SmartProjectionFactorP<CAMERA> This; typedef SmartProjectionFactorP<CAMERA> This;
typedef CAMERA Camera;
typedef typename CAMERA::CalibrationType CALIBRATION; typedef typename CAMERA::CalibrationType CALIBRATION;
protected: protected:
@ -63,6 +62,8 @@ class SmartProjectionFactorP : public SmartProjectionFactor<CAMERA> {
std::vector<Pose3> body_P_sensors_; std::vector<Pose3> body_P_sensors_;
public: public:
typedef CAMERA Camera;
typedef CameraSet<CAMERA> Cameras;
/// shorthand for a smart pointer to a factor /// shorthand for a smart pointer to a factor
typedef boost::shared_ptr<This> shared_ptr; typedef boost::shared_ptr<This> shared_ptr;

View File

@ -182,92 +182,98 @@ TEST( SmartProjectionFactorP, noisy ) {
sharedKs.push_back(sharedK); sharedKs.push_back(sharedK);
sharedKs.push_back(sharedK); sharedKs.push_back(sharedK);
std::vector<Pose3> body_P_sensors;
body_P_sensors.push_back(Pose3::identity());
body_P_sensors.push_back(Pose3::identity());
KeyVector views {x1, x2}; KeyVector views {x1, x2};
factor2->add(measurements, views, sharedKs, body_P_sensors); factor2->add(measurements, views, sharedKs);
double actualError2 = factor2->error(values); double actualError2 = factor2->error(values);
DOUBLES_EQUAL(actualError1, actualError2, 1e-7); DOUBLES_EQUAL(actualError1, actualError2, 1e-7);
} }
///* *************************************************************************/ /* *************************************************************************/
//TEST(SmartProjectionFactorP, smartFactorWithSensorBodyTransform) { TEST(SmartProjectionFactorP, smartFactorWithSensorBodyTransform) {
// using namespace vanillaPose; using namespace vanillaPose;
//
// // create arbitrary body_T_sensor (transforms from sensor to body) // create arbitrary body_T_sensor (transforms from sensor to body)
// Pose3 body_T_sensor = Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), Point3(1, 1, 1)); Pose3 body_T_sensor = Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2), Point3(1, 1, 1));
//
// // These are the poses we want to estimate, from camera measurements // These are the poses we want to estimate, from camera measurements
// const Pose3 sensor_T_body = body_T_sensor.inverse(); const Pose3 sensor_T_body = body_T_sensor.inverse();
// Pose3 wTb1 = cam1.pose() * sensor_T_body; Pose3 wTb1 = cam1.pose() * sensor_T_body;
// Pose3 wTb2 = cam2.pose() * sensor_T_body; Pose3 wTb2 = cam2.pose() * sensor_T_body;
// Pose3 wTb3 = cam3.pose() * sensor_T_body; Pose3 wTb3 = cam3.pose() * sensor_T_body;
//
// // three landmarks ~5 meters infront of camera // three landmarks ~5 meters infront of camera
// Point3 landmark1(5, 0.5, 1.2), landmark2(5, -0.5, 1.2), landmark3(5, 0, 3.0); Point3 landmark1(5, 0.5, 1.2), landmark2(5, -0.5, 1.2), landmark3(5, 0, 3.0);
//
// Point2Vector measurements_cam1, measurements_cam2, measurements_cam3; Point2Vector measurements_cam1, measurements_cam2, measurements_cam3;
//
// // Project three landmarks into three cameras // Project three landmarks into three cameras
// projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_cam1); projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_cam1);
// projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_cam2); projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_cam2);
// projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_cam3); projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_cam3);
//
// // Create smart factors // Create smart factors
// KeyVector views {x1, x2, x3}; KeyVector views {x1, x2, x3};
//
// SmartProjectionParams params; SmartProjectionParams params;
// params.setRankTolerance(1.0); params.setRankTolerance(1.0);
// params.setDegeneracyMode(IGNORE_DEGENERACY); params.setDegeneracyMode(IGNORE_DEGENERACY);
// params.setEnableEPI(false); params.setEnableEPI(false);
//
// SmartFactorP smartFactor1(model, sharedK, body_T_sensor, params); std::vector<boost::shared_ptr<Cal3_S2>> sharedKs;
// smartFactor1.add(measurements_cam1, views); sharedKs.push_back(sharedK);
// sharedKs.push_back(sharedK);
// SmartFactorP smartFactor2(model, sharedK, body_T_sensor, params); sharedKs.push_back(sharedK);
// smartFactor2.add(measurements_cam2, views);
// std::vector<Pose3> body_T_sensors;
// SmartFactorP smartFactor3(model, sharedK, body_T_sensor, params); body_T_sensors.push_back(body_T_sensor);
// smartFactor3.add(measurements_cam3, views); body_T_sensors.push_back(body_T_sensor);
// body_T_sensors.push_back(body_T_sensor);
// const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
// SmartFactorP smartFactor1(model, params);
// // Put all factors in factor graph, adding priors smartFactor1.add(measurements_cam1, views, sharedKs, body_T_sensors);
// NonlinearFactorGraph graph;
// graph.push_back(smartFactor1); SmartFactorP smartFactor2(model, params);
// graph.push_back(smartFactor2); smartFactor2.add(measurements_cam2, views, sharedKs, body_T_sensors);
// graph.push_back(smartFactor3);
// graph.addPrior(x1, wTb1, noisePrior); SmartFactorP smartFactor3(model, params);
// graph.addPrior(x2, wTb2, noisePrior); smartFactor3.add(measurements_cam3, views, sharedKs, body_T_sensors);;
//
// // Check errors at ground truth poses const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
// Values gtValues;
// gtValues.insert(x1, wTb1); // Put all factors in factor graph, adding priors
// gtValues.insert(x2, wTb2); NonlinearFactorGraph graph;
// gtValues.insert(x3, wTb3); graph.push_back(smartFactor1);
// double actualError = graph.error(gtValues); graph.push_back(smartFactor2);
// double expectedError = 0.0; graph.push_back(smartFactor3);
// DOUBLES_EQUAL(expectedError, actualError, 1e-7) graph.addPrior(x1, wTb1, noisePrior);
// graph.addPrior(x2, wTb2, noisePrior);
// Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100),
// Point3(0.1, 0.1, 0.1)); // Check errors at ground truth poses
// Values values; Values gtValues;
// values.insert(x1, wTb1); gtValues.insert(x1, wTb1);
// values.insert(x2, wTb2); gtValues.insert(x2, wTb2);
// // initialize third pose with some noise, we expect it to move back to gtValues.insert(x3, wTb3);
// // original pose3 double actualError = graph.error(gtValues);
// values.insert(x3, wTb3 * noise_pose); double expectedError = 0.0;
// DOUBLES_EQUAL(expectedError, actualError, 1e-7)
// LevenbergMarquardtParams lmParams;
// Values result; Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100),
// LevenbergMarquardtOptimizer optimizer(graph, values, lmParams); Point3(0.1, 0.1, 0.1));
// result = optimizer.optimize(); Values values;
// EXPECT(assert_equal(wTb3, result.at<Pose3>(x3))); values.insert(x1, wTb1);
//} values.insert(x2, wTb2);
// // initialize third pose with some noise, we expect it to move back to
// original pose3
values.insert(x3, wTb3 * noise_pose);
LevenbergMarquardtParams lmParams;
Values result;
LevenbergMarquardtOptimizer optimizer(graph, values, lmParams);
result = optimizer.optimize();
EXPECT(assert_equal(wTb3, result.at<Pose3>(x3)));
}
///* *************************************************************************/ ///* *************************************************************************/
//TEST( SmartProjectionFactorP, 3poses_smart_projection_factor ) { //TEST( SmartProjectionFactorP, 3poses_smart_projection_factor ) {
// //