translation recovery unit tests pass

release/4.3a0
akrishnan86 2020-11-30 00:30:19 -08:00
parent 4bc250e7c0
commit 8d009c2fcf
3 changed files with 132 additions and 62 deletions

View File

@ -11,7 +11,7 @@
/**
* @file TranslationRecovery.cpp
* @author Frank Dellaert
* @author Frank Dellaert, Akshay Krishnan
* @date March 2020
* @brief Source code for recovering translations when rotations are given
*/

View File

@ -16,16 +16,16 @@
* @brief Recovering translations in an epipolar graph when rotations are given.
*/
#include <map>
#include <set>
#include <utility>
#include <vector>
#include <gtsam/geometry/Unit3.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/nonlinear/Values.h>
#include <gtsam/sfm/BinaryMeasurement.h>
#include <utility>
#include <vector>
#include <set>
#include <map>
namespace gtsam {
// Set up an optimization problem for the unknown translations Ti in the world
@ -63,14 +63,15 @@ class TranslationRecovery {
* @brief Construct a new Translation Recovery object
*
* @param relativeTranslations the relative translations, in world coordinate
* frames, vector of BinaryMeasurements of Unit3, where each key of a measurement
* is a point in 3D.
* frames, vector of BinaryMeasurements of Unit3, where each key of a
* measurement is a point in 3D.
* @param lmParams (optional) gtsam::LavenbergMarquardtParams that can be
* used to modify the parameters for the LM optimizer. By default, uses the
* default LM parameters.
* default LM parameters.
*/
TranslationRecovery(const TranslationEdges &relativeTranslations,
const LevenbergMarquardtParams &lmParams = LevenbergMarquardtParams());
TranslationRecovery(
const TranslationEdges &relativeTranslations,
const LevenbergMarquardtParams &lmParams = LevenbergMarquardtParams());
/**
* @brief Build the factor graph to do the optimization.
@ -110,8 +111,8 @@ class TranslationRecovery {
*
* @param poses SE(3) ground truth poses stored as Values
* @param edges pairs (a,b) for which a measurement w_aZb will be generated.
* @return TranslationEdges vector of binary measurements where the keys are
* the cameras and the measurement is the simulated Unit3 translation
* @return TranslationEdges vector of binary measurements where the keys are
* the cameras and the measurement is the simulated Unit3 translation
* direction between the cameras.
*/
static TranslationEdges SimulateMeasurements(

View File

@ -11,18 +11,29 @@
/**
* @file testTranslationRecovery.cpp
* @author Frank Dellaert
* @author Frank Dellaert, Akshay Krishnan
* @date March 2020
* @brief test recovering translations when rotations are given.
*/
#include <CppUnitLite/TestHarness.h>
#include <gtsam/sfm/TranslationRecovery.h>
#include <gtsam/slam/dataset.h>
using namespace std;
using namespace gtsam;
// Returns the Unit3 direction as measured in the binary measurement, but
// computed from the input poses. Helper function used in the unit tests.
Unit3 GetDirectionFromPoses(const Values& poses,
const BinaryMeasurement<Unit3>& unitTranslation) {
const Pose3 wTa = poses.at<Pose3>(unitTranslation.key1()),
wTb = poses.at<Pose3>(unitTranslation.key2());
const Point3 Ta = wTa.translation(), Tb = wTb.translation();
return Unit3(Tb - Ta);
}
/* ************************************************************************* */
// We read the BAL file, which has 3 cameras in it, with poses. We then assume
// the rotations are correct, but translations have to be estimated from
@ -47,30 +58,25 @@ TEST(TranslationRecovery, BAL) {
const auto relativeTranslations = TranslationRecovery::SimulateMeasurements(
poses, {{0, 1}, {0, 2}, {1, 2}});
// Check
Unit3 w_aZb_stored; // measurement between 0 and 1 stored for next unit test
// Check simulated measurements.
for (auto& unitTranslation : relativeTranslations) {
const Pose3 wTa = poses.at<Pose3>(unitTranslation.key1()),
wTb = poses.at<Pose3>(unitTranslation.key2());
const Point3 Ta = wTa.translation(), Tb = wTb.translation();
const Unit3 w_aZb = unitTranslation.measured();
EXPECT(assert_equal(Unit3(Tb - Ta), w_aZb));
if (unitTranslation.key1() == 0 && unitTranslation.key2() == 1) {
w_aZb_stored = unitTranslation.measured();
}
EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation),
unitTranslation.measured()));
}
TranslationRecovery algorithm(relativeTranslations);
const auto graph = algorithm.buildGraph();
EXPECT_LONGS_EQUAL(3, graph.size());
// Translation recovery, version 1
// Run translation recovery
const double scale = 2.0;
const auto result = algorithm.run(scale);
// Check result for first two translations, determined by prior
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0)));
EXPECT(assert_equal(Point3(2 * w_aZb_stored.point3()), result.at<Point3>(1)));
EXPECT(assert_equal(
Point3(2 * GetDirectionFromPoses(poses, relativeTranslations[0])),
result.at<Point3>(1)));
// Check that the third translations is correct
Point3 Ta = poses.at<Pose3>(0).translation();
@ -83,53 +89,120 @@ TEST(TranslationRecovery, BAL) {
// EXPECT_DOUBLES_EQUAL(0.0199833, actualError, 1e-5);
}
TEST(TranslationRecovery, ZeroRelativeTranslations) {
// Create a graph with 3 cameras.
TEST(TranslationRecovery, TwoPointTest) {
// Create a dataset with 2 poses.
// __ __
// \/ \/
// 0 _____ 1
// 2 <|
//
// 0 and 1 face in the same direction but have a translation offset. 2 is at
// the same point as 1 but is rotated, with very little FOV overlap.
// 0 and 1 face in the same direction but have a translation offset.
Values poses;
poses.insert(0, Pose3(Rot3(), Point3()));
poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0)));
poses.insert(2, Pose3(Rot3::RzRyRx(-M_PI / 2, 0, 0), Point3(2, 0, 0)));
poses.insert<Pose3>(0, Pose3(Rot3(), Point3(0, 0, 0)));
poses.insert<Pose3>(1, Pose3(Rot3(), Point3(2, 0, 0)));
auto relativeTranslations =
TranslationRecovery::SimulateMeasurements(poses, {{0, 1}, {1, 2}});
TranslationRecovery::SimulateMeasurements(poses, {{0, 1}});
// Check
// Check simulated measurements.
for (auto& unitTranslation : relativeTranslations) {
const Pose3 wTa = poses.at<Pose3>(unitTranslation.key1()),
wTb = poses.at<Pose3>(unitTranslation.key2());
const Point3 Ta = wTa.translation(), Tb = wTb.translation();
const Unit3 w_aZb = unitTranslation.measured();
EXPECT(assert_equal(Unit3(Tb - Ta), w_aZb));
EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation),
unitTranslation.measured()));
}
TranslationRecovery algorithm(relativeTranslations);
const auto graph = algorithm.buildGraph();
EXPECT_LONGS_EQUAL(1, graph.size());
// Translation recovery, version 1
const double scale = 2.0;
const auto result = algorithm.run(scale);
// Run translation recovery
const auto result = algorithm.run(/*scale=*/2.0);
// Check result for first two translations, determined by prior
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0)));
EXPECT(assert_equal(Point3(2, 0, 0), result.at<Point3>(1)));
}
TEST(TranslationRecovery, ThreePointTest) {
// Create a dataset with 3 poses.
// __ __
// \/ \/
// 0 _____ 1
// \ __ /
// \\//
// 3
//
// 0 and 1 face in the same direction but have a translation offset. 3 is in
// the same direction as 0 and 1, in between 0 and 1, with some Y axis offset.
Values poses;
poses.insert<Pose3>(0, Pose3(Rot3(), Point3(0, 0, 0)));
poses.insert<Pose3>(1, Pose3(Rot3(), Point3(2, 0, 0)));
poses.insert<Pose3>(3, Pose3(Rot3(), Point3(1, -1, 0)));
auto relativeTranslations = TranslationRecovery::SimulateMeasurements(
poses, {{0, 1}, {1, 3}, {3, 0}});
// Check simulated measurements.
for (auto& unitTranslation : relativeTranslations) {
EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation),
unitTranslation.measured()));
}
TranslationRecovery algorithm(relativeTranslations);
const auto graph = algorithm.buildGraph();
EXPECT_LONGS_EQUAL(3, graph.size());
const auto result = algorithm.run(/*scale=*/2.0);
// Check result
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0)));
EXPECT(assert_equal(Point3(2, 0, 0), result.at<Point3>(1)));
EXPECT(assert_equal(Point3(1, -1, 0), result.at<Point3>(3)));
}
TEST(TranslationRecovery, TwoPointsAndZeroTranslation) {
// Create a dataset with 3 poses.
// __ __
// \/ \/
// 0 _____ 1
// 2 <|
//
// 0 and 1 face in the same direction but have a translation offset. 2 is at
// the same point as 1 but is rotated, with little FOV overlap.
Values poses;
poses.insert<Pose3>(0, Pose3(Rot3(), Point3(0, 0, 0)));
poses.insert<Pose3>(1, Pose3(Rot3(), Point3(2, 0, 0)));
poses.insert<Pose3>(2, Pose3(Rot3::RzRyRx(-M_PI / 2, 0, 0), Point3(2, 0, 0)));
auto relativeTranslations =
TranslationRecovery::SimulateMeasurements(poses, {{0, 1}, {1, 2}});
// Check simulated measurements.
for (auto& unitTranslation : relativeTranslations) {
EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation),
unitTranslation.measured()));
}
TranslationRecovery algorithm(relativeTranslations);
const auto graph = algorithm.buildGraph();
// There is only 1 non-zero translation edge.
EXPECT_LONGS_EQUAL(1, graph.size());
// Run translation recovery
const auto result = algorithm.run(/*scale=*/2.0);
// Check result
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0)));
EXPECT(assert_equal(Point3(2, 0, 0), result.at<Point3>(1)));
EXPECT(assert_equal(Point3(2, 0, 0), result.at<Point3>(2)));
}
TEST(TranslationRecovery, ZeroRelativeTranslations4Cameras) {
// Create a graph with 4 cameras.
TEST(TranslationRecovery, ThreePointsAndZeroTranslation) {
// Create a dataset with 4 poses.
// __ __
// \/ \/
// 0 _____ 1
// \ 2 <|
// \ /
// \ __ 2 <|
// \\//
// 3
//
// 0 and 1 face in the same direction but have a translation offset. 2 is at
@ -137,32 +210,28 @@ TEST(TranslationRecovery, ZeroRelativeTranslations4Cameras) {
// the same direction as 0 and 1, in between 0 and 1, with some Y axis offset.
Values poses;
poses.insert(0, Pose3(Rot3(), Point3()));
poses.insert(1, Pose3(Rot3(), Point3(2, 0, 0)));
poses.insert(2, Pose3(Rot3::RzRyRx(-M_PI / 2, 0, 0), Point3(2, 0, 0)));
poses.insert(3, Pose3(Rot3(), Point3(1, -1, 0)));
poses.insert<Pose3>(0, Pose3(Rot3(), Point3(0, 0, 0)));
poses.insert<Pose3>(1, Pose3(Rot3(), Point3(2, 0, 0)));
poses.insert<Pose3>(2, Pose3(Rot3::RzRyRx(-M_PI / 2, 0, 0), Point3(2, 0, 0)));
poses.insert<Pose3>(3, Pose3(Rot3(), Point3(1, -1, 0)));
auto relativeTranslations = TranslationRecovery::SimulateMeasurements(
poses, {{0, 1}, {1, 2}, {1, 3}, {3, 0}});
// Check
// Check simulated measurements.
for (auto& unitTranslation : relativeTranslations) {
const Pose3 wTa = poses.at<Pose3>(unitTranslation.key1()),
wTb = poses.at<Pose3>(unitTranslation.key2());
const Point3 Ta = wTa.translation(), Tb = wTb.translation();
const Unit3 w_aZb = unitTranslation.measured();
EXPECT(assert_equal(Unit3(Tb - Ta), w_aZb));
EXPECT(assert_equal(GetDirectionFromPoses(poses, unitTranslation),
unitTranslation.measured()));
}
TranslationRecovery algorithm(relativeTranslations);
const auto graph = algorithm.buildGraph();
EXPECT_LONGS_EQUAL(3, graph.size());
// Translation recovery, version 1
const double scale = 2.0;
const auto result = algorithm.run(scale);
// Run translation recovery
const auto result = algorithm.run(/*scale=*/2.0);
// Check result for first two translations, determined by prior
// Check result
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0)));
EXPECT(assert_equal(Point3(2, 0, 0), result.at<Point3>(1)));
EXPECT(assert_equal(Point3(2, 0, 0), result.at<Point3>(2)));