Merge remote-tracking branch 'origin/develop' into fan/prototype-hybrid-tr

release/4.3a0
Fan Jiang 2022-05-21 13:56:41 -07:00
commit d5fd279449
32 changed files with 970 additions and 363 deletions

1
.gitignore vendored
View File

@ -18,3 +18,4 @@
CMakeLists.txt.user* CMakeLists.txt.user*
xcode/ xcode/
/Dockerfile /Dockerfile
/python/gtsam/notebooks/.ipynb_checkpoints/ellipses-checkpoint.ipynb

View File

@ -10,7 +10,7 @@ endif()
set (GTSAM_VERSION_MAJOR 4) set (GTSAM_VERSION_MAJOR 4)
set (GTSAM_VERSION_MINOR 2) set (GTSAM_VERSION_MINOR 2)
set (GTSAM_VERSION_PATCH 0) set (GTSAM_VERSION_PATCH 0)
set (GTSAM_PRERELEASE_VERSION "a6") set (GTSAM_PRERELEASE_VERSION "a7")
math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}") math (EXPR GTSAM_VERSION_NUMERIC "10000 * ${GTSAM_VERSION_MAJOR} + 100 * ${GTSAM_VERSION_MINOR} + ${GTSAM_VERSION_PATCH}")
if (${GTSAM_VERSION_PATCH} EQUAL 0) if (${GTSAM_VERSION_PATCH} EQUAL 0)

View File

@ -16,6 +16,7 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
set(Python_VERSION_MAJOR ${PYTHON_VERSION_MAJOR}) set(Python_VERSION_MAJOR ${PYTHON_VERSION_MAJOR})
set(Python_VERSION_MINOR ${PYTHON_VERSION_MINOR}) set(Python_VERSION_MINOR ${PYTHON_VERSION_MINOR})
set(Python_VERSION_PATCH ${PYTHON_VERSION_PATCH})
set(Python_EXECUTABLE ${PYTHON_EXECUTABLE}) set(Python_EXECUTABLE ${PYTHON_EXECUTABLE})
else() else()
@ -31,11 +32,12 @@ if(GTSAM_BUILD_PYTHON OR GTSAM_INSTALL_MATLAB_TOOLBOX)
set(Python_VERSION_MAJOR ${Python3_VERSION_MAJOR}) set(Python_VERSION_MAJOR ${Python3_VERSION_MAJOR})
set(Python_VERSION_MINOR ${Python3_VERSION_MINOR}) set(Python_VERSION_MINOR ${Python3_VERSION_MINOR})
set(Python_VERSION_PATCH ${Python3_VERSION_PATCH})
endif() endif()
set(GTSAM_PYTHON_VERSION set(GTSAM_PYTHON_VERSION
"${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}" "${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}.${Python_VERSION_PATCH}"
CACHE STRING "The version of Python to build the wrappers against." CACHE STRING "The version of Python to build the wrappers against."
FORCE) FORCE)

View File

@ -415,16 +415,16 @@ TEST(DiscreteFactorGraph, DotWithNames) {
"graph {\n" "graph {\n"
" size=\"5,5\";\n" " size=\"5,5\";\n"
"\n" "\n"
" varC[label=\"C\"];\n" " var0[label=\"C\"];\n"
" varA[label=\"A\"];\n" " var1[label=\"A\"];\n"
" varB[label=\"B\"];\n" " var2[label=\"B\"];\n"
"\n" "\n"
" factor0[label=\"\", shape=point];\n" " factor0[label=\"\", shape=point];\n"
" varC--factor0;\n" " var0--factor0;\n"
" varA--factor0;\n" " var1--factor0;\n"
" factor1[label=\"\", shape=point];\n" " factor1[label=\"\", shape=point];\n"
" varC--factor1;\n" " var0--factor1;\n"
" varB--factor1;\n" " var2--factor1;\n"
"}\n"; "}\n";
EXPECT(actual == expected); EXPECT(actual == expected);
} }

View File

@ -1088,58 +1088,149 @@ class StereoCamera {
}; };
#include <gtsam/geometry/triangulation.h> #include <gtsam/geometry/triangulation.h>
class TriangulationResult {
enum Status { VALID, DEGENERATE, BEHIND_CAMERA, OUTLIER, FAR_POINT };
Status status;
TriangulationResult(const gtsam::Point3& p);
const gtsam::Point3& get() const;
static TriangulationResult Degenerate();
static TriangulationResult Outlier();
static TriangulationResult FarPoint();
static TriangulationResult BehindCamera();
bool valid() const;
bool degenerate() const;
bool outlier() const;
bool farPoint() const;
bool behindCamera() const;
};
class TriangulationParameters {
double rankTolerance;
bool enableEPI;
double landmarkDistanceThreshold;
double dynamicOutlierRejectionThreshold;
SharedNoiseModel noiseModel;
TriangulationParameters(const double rankTolerance = 1.0,
const bool enableEPI = false,
double landmarkDistanceThreshold = -1,
double dynamicOutlierRejectionThreshold = -1,
const gtsam::SharedNoiseModel& noiseModel = nullptr);
};
// Templates appear not yet supported for free functions - issue raised at // Templates appear not yet supported for free functions - issue raised at
// borglab/wrap#14 to add support // borglab/wrap#14 to add support
// Cal3_S2 versions
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses, gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Cal3_S2* sharedCal, gtsam::Cal3_S2* sharedCal,
const gtsam::Point2Vector& measurements, const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize, double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr); const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Cal3DS2* sharedCal,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Cal3Bundler* sharedCal,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3_S2& cameras, gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3_S2& cameras,
const gtsam::Point2Vector& measurements, const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize, double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr); const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
gtsam::Cal3_S2* sharedCal,
const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate);
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3_S2& cameras,
const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate);
gtsam::TriangulationResult triangulateSafe(
const gtsam::CameraSetCal3_S2& cameras,
const gtsam::Point2Vector& measurements,
const gtsam::TriangulationParameters& params);
// Cal3DS2 versions
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Cal3DS2* sharedCal,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3DS2& cameras,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
gtsam::Cal3DS2* sharedCal,
const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate);
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3DS2& cameras,
const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate);
gtsam::TriangulationResult triangulateSafe(
const gtsam::CameraSetCal3DS2& cameras,
const gtsam::Point2Vector& measurements,
const gtsam::TriangulationParameters& params);
// Cal3Bundler versions
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Cal3Bundler* sharedCal,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Bundler& cameras, gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Bundler& cameras,
const gtsam::Point2Vector& measurements, const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize, double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr); const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
gtsam::Cal3Bundler* sharedCal,
const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate);
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Bundler& cameras,
const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate);
gtsam::TriangulationResult triangulateSafe(
const gtsam::CameraSetCal3Bundler& cameras,
const gtsam::Point2Vector& measurements,
const gtsam::TriangulationParameters& params);
// Cal3Fisheye versions
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Cal3Fisheye* sharedCal,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Fisheye& cameras, gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Fisheye& cameras,
const gtsam::Point2Vector& measurements, const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize, double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr); const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
gtsam::Cal3Fisheye* sharedCal,
const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate);
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Fisheye& cameras,
const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate);
gtsam::TriangulationResult triangulateSafe(
const gtsam::CameraSetCal3Fisheye& cameras,
const gtsam::Point2Vector& measurements,
const gtsam::TriangulationParameters& params);
// Cal3Unified versions
gtsam::Point3 triangulatePoint3(const gtsam::Pose3Vector& poses,
gtsam::Cal3Unified* sharedCal,
const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Unified& cameras, gtsam::Point3 triangulatePoint3(const gtsam::CameraSetCal3Unified& cameras,
const gtsam::Point2Vector& measurements, const gtsam::Point2Vector& measurements,
double rank_tol, bool optimize, double rank_tol, bool optimize,
const gtsam::SharedNoiseModel& model = nullptr); const gtsam::SharedNoiseModel& model = nullptr);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses, gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses,
gtsam::Cal3_S2* sharedCal, gtsam::Cal3Unified* sharedCal,
const gtsam::Point2Vector& measurements, const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate); const gtsam::Point3& initialEstimate);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses, gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Unified& cameras,
gtsam::Cal3DS2* sharedCal,
const gtsam::Point2Vector& measurements, const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate); const gtsam::Point3& initialEstimate);
gtsam::Point3 triangulateNonlinear(const gtsam::Pose3Vector& poses, gtsam::TriangulationResult triangulateSafe(
gtsam::Cal3Bundler* sharedCal, const gtsam::CameraSetCal3Unified& cameras,
const gtsam::Point2Vector& measurements, const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate); const gtsam::TriangulationParameters& params);
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3_S2& cameras,
const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate);
gtsam::Point3 triangulateNonlinear(const gtsam::CameraSetCal3Bundler& cameras,
const gtsam::Point2Vector& measurements,
const gtsam::Point3& initialEstimate);
#include <gtsam/geometry/BearingRange.h> #include <gtsam/geometry/BearingRange.h>
template <POSE, POINT, BEARING, RANGE> template <POSE, POINT, BEARING, RANGE>

View File

@ -23,6 +23,7 @@
#include <gtsam/geometry/Cal3Fisheye.h> #include <gtsam/geometry/Cal3Fisheye.h>
#include <gtsam/geometry/Cal3Unified.h> #include <gtsam/geometry/Cal3Unified.h>
#include <gtsam/geometry/Cal3_S2.h> #include <gtsam/geometry/Cal3_S2.h>
#include <gtsam/geometry/Cal3DS2.h>
#include <gtsam/geometry/CameraSet.h> #include <gtsam/geometry/CameraSet.h>
#include <gtsam/geometry/PinholeCamera.h> #include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/geometry/SphericalCamera.h> #include <gtsam/geometry/SphericalCamera.h>
@ -510,18 +511,18 @@ private:
}; };
/** /**
* TriangulationResult is an optional point, along with the reasons why it is invalid. * TriangulationResult is an optional point, along with the reasons why it is
* invalid.
*/ */
class TriangulationResult : public boost::optional<Point3> { class TriangulationResult : public boost::optional<Point3> {
enum Status {
VALID, DEGENERATE, BEHIND_CAMERA, OUTLIER, FAR_POINT
};
Status status_;
TriangulationResult(Status s) :
status_(s) {
}
public: public:
enum Status { VALID, DEGENERATE, BEHIND_CAMERA, OUTLIER, FAR_POINT };
Status status;
private:
TriangulationResult(Status s) : status(s) {}
public:
/** /**
* Default constructor, only for serialization * Default constructor, only for serialization
*/ */
@ -530,54 +531,38 @@ public:
/** /**
* Constructor * Constructor
*/ */
TriangulationResult(const Point3& p) : TriangulationResult(const Point3& p) : status(VALID) { reset(p); }
status_(VALID) {
reset(p);
}
static TriangulationResult Degenerate() { static TriangulationResult Degenerate() {
return TriangulationResult(DEGENERATE); return TriangulationResult(DEGENERATE);
} }
static TriangulationResult Outlier() { static TriangulationResult Outlier() { return TriangulationResult(OUTLIER); }
return TriangulationResult(OUTLIER);
}
static TriangulationResult FarPoint() { static TriangulationResult FarPoint() {
return TriangulationResult(FAR_POINT); return TriangulationResult(FAR_POINT);
} }
static TriangulationResult BehindCamera() { static TriangulationResult BehindCamera() {
return TriangulationResult(BEHIND_CAMERA); return TriangulationResult(BEHIND_CAMERA);
} }
bool valid() const { bool valid() const { return status == VALID; }
return status_ == VALID; bool degenerate() const { return status == DEGENERATE; }
} bool outlier() const { return status == OUTLIER; }
bool degenerate() const { bool farPoint() const { return status == FAR_POINT; }
return status_ == DEGENERATE; bool behindCamera() const { return status == BEHIND_CAMERA; }
}
bool outlier() const {
return status_ == OUTLIER;
}
bool farPoint() const {
return status_ == FAR_POINT;
}
bool behindCamera() const {
return status_ == BEHIND_CAMERA;
}
// stream to output // stream to output
friend std::ostream& operator<<(std::ostream& os, friend std::ostream& operator<<(std::ostream& os,
const TriangulationResult& result) { const TriangulationResult& result) {
if (result) if (result)
os << "point = " << *result << std::endl; os << "point = " << *result << std::endl;
else else
os << "no point, status = " << result.status_ << std::endl; os << "no point, status = " << result.status << std::endl;
return os; return os;
} }
private: private:
/// Serialization function /// Serialization function
friend class boost::serialization::access; friend class boost::serialization::access;
template <class ARCHIVE> template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int version) { void serialize(ARCHIVE& ar, const unsigned int version) {
ar & BOOST_SERIALIZATION_NVP(status_); ar& BOOST_SERIALIZATION_NVP(status);
} }
}; };
@ -644,6 +629,7 @@ TriangulationResult triangulateSafe(const CameraSet<CAMERA>& cameras,
// Vector of Cameras - used by the Python/MATLAB wrapper // Vector of Cameras - used by the Python/MATLAB wrapper
using CameraSetCal3Bundler = CameraSet<PinholeCamera<Cal3Bundler>>; using CameraSetCal3Bundler = CameraSet<PinholeCamera<Cal3Bundler>>;
using CameraSetCal3_S2 = CameraSet<PinholeCamera<Cal3_S2>>; using CameraSetCal3_S2 = CameraSet<PinholeCamera<Cal3_S2>>;
using CameraSetCal3DS2 = CameraSet<PinholeCamera<Cal3DS2>>;
using CameraSetCal3Fisheye = CameraSet<PinholeCamera<Cal3Fisheye>>; using CameraSetCal3Fisheye = CameraSet<PinholeCamera<Cal3Fisheye>>;
using CameraSetCal3Unified = CameraSet<PinholeCamera<Cal3Unified>>; using CameraSetCal3Unified = CameraSet<PinholeCamera<Cal3Unified>>;
using CameraSetSpherical = CameraSet<SphericalCamera>; using CameraSetSpherical = CameraSet<SphericalCamera>;

View File

@ -53,8 +53,9 @@ void BayesNet<CONDITIONAL>::dot(std::ostream& os,
auto frontals = conditional->frontals(); auto frontals = conditional->frontals();
const Key me = frontals.front(); const Key me = frontals.front();
auto parents = conditional->parents(); auto parents = conditional->parents();
for (const Key& p : parents) for (const Key& p : parents) {
os << " var" << keyFormatter(p) << "->var" << keyFormatter(me) << "\n"; os << " var" << p << "->var" << me << "\n";
}
} }
os << "}"; os << "}";

View File

@ -43,7 +43,7 @@ void DotWriter::drawVariable(Key key, const KeyFormatter& keyFormatter,
const boost::optional<Vector2>& position, const boost::optional<Vector2>& position,
ostream* os) const { ostream* os) const {
// Label the node with the label from the KeyFormatter // Label the node with the label from the KeyFormatter
*os << " var" << keyFormatter(key) << "[label=\"" << keyFormatter(key) *os << " var" << key << "[label=\"" << keyFormatter(key)
<< "\""; << "\"";
if (position) { if (position) {
*os << ", pos=\"" << position->x() << "," << position->y() << "!\""; *os << ", pos=\"" << position->x() << "," << position->y() << "!\"";
@ -65,13 +65,13 @@ void DotWriter::DrawFactor(size_t i, const boost::optional<Vector2>& position,
static void ConnectVariables(Key key1, Key key2, static void ConnectVariables(Key key1, Key key2,
const KeyFormatter& keyFormatter, ostream* os) { const KeyFormatter& keyFormatter, ostream* os) {
*os << " var" << keyFormatter(key1) << "--" *os << " var" << key1 << "--"
<< "var" << keyFormatter(key2) << ";\n"; << "var" << key2 << ";\n";
} }
static void ConnectVariableFactor(Key key, const KeyFormatter& keyFormatter, static void ConnectVariableFactor(Key key, const KeyFormatter& keyFormatter,
size_t i, ostream* os) { size_t i, ostream* os) {
*os << " var" << keyFormatter(key) << "--" *os << " var" << key << "--"
<< "factor" << i << ";\n"; << "factor" << i << ";\n";
} }

View File

@ -337,7 +337,6 @@ vector<size_t> SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg,
DSFVector dsf(n); DSFVector dsf(n);
size_t count = 0; size_t count = 0;
double sum = 0.0;
for (const size_t index : sortedIndices) { for (const size_t index : sortedIndices) {
const GaussianFactor &gf = *gfg[index]; const GaussianFactor &gf = *gfg[index];
const auto keys = gf.keys(); const auto keys = gf.keys();
@ -347,7 +346,6 @@ vector<size_t> SubgraphBuilder::kruskal(const GaussianFactorGraph &gfg,
if (dsf.find(u) != dsf.find(v)) { if (dsf.find(u) != dsf.find(v)) {
dsf.merge(u, v); dsf.merge(u, v);
treeIndices.push_back(index); treeIndices.push_back(index);
sum += weights[index];
if (++count == n - 1) break; if (++count == n - 1) break;
} }
} }

View File

@ -94,7 +94,6 @@ namespace gtsam {
Vector evaluateError(const T& x, boost::optional<Matrix&> H = boost::none) const override { Vector evaluateError(const T& x, boost::optional<Matrix&> H = boost::none) const override {
if (H) (*H) = Matrix::Identity(traits<T>::GetDimension(x),traits<T>::GetDimension(x)); if (H) (*H) = Matrix::Identity(traits<T>::GetDimension(x),traits<T>::GetDimension(x));
// manifold equivalent of z-x -> Local(x,z) // manifold equivalent of z-x -> Local(x,z)
// TODO(ASL) Add Jacobians.
return -traits<T>::Local(x, prior_); return -traits<T>::Local(x, prior_);
} }

View File

@ -484,6 +484,9 @@ virtual class NonlinearOptimizerParams {
bool isSequential() const; bool isSequential() const;
bool isCholmod() const; bool isCholmod() const;
bool isIterative() const; bool isIterative() const;
// This only applies to python since matlab does not have lambda machinery.
gtsam::NonlinearOptimizerParams::IterationHook iterationHook;
}; };
bool checkConvergence(double relativeErrorTreshold, bool checkConvergence(double relativeErrorTreshold,

View File

@ -21,13 +21,16 @@
#include <gtsam/geometry/Pose3.h> #include <gtsam/geometry/Pose3.h>
#include <gtsam/geometry/Unit3.h> #include <gtsam/geometry/Unit3.h>
#include <gtsam/linear/NoiseModel.h> #include <gtsam/linear/NoiseModel.h>
#include <gtsam/nonlinear/ExpressionFactor.h>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h> #include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/nonlinear/NonlinearFactor.h> #include <gtsam/nonlinear/NonlinearFactor.h>
#include <gtsam/nonlinear/NonlinearFactorGraph.h> #include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/Values.h> #include <gtsam/nonlinear/Values.h>
#include <gtsam/sfm/TranslationFactor.h> #include <gtsam/sfm/TranslationFactor.h>
#include <gtsam/sfm/TranslationRecovery.h> #include <gtsam/sfm/TranslationRecovery.h>
#include <gtsam/slam/BetweenFactor.h>
#include <gtsam/slam/PriorFactor.h> #include <gtsam/slam/PriorFactor.h>
#include <gtsam/slam/expressions.h>
#include <set> #include <set>
#include <utility> #include <utility>
@ -38,16 +41,13 @@ using namespace std;
// In Wrappers we have no access to this so have a default ready. // In Wrappers we have no access to this so have a default ready.
static std::mt19937 kRandomNumberGenerator(42); static std::mt19937 kRandomNumberGenerator(42);
TranslationRecovery::TranslationRecovery(
const TranslationRecovery::TranslationEdges &relativeTranslations,
const LevenbergMarquardtParams &lmParams)
: params_(lmParams) {
// Some relative translations may be zero. We treat nodes that have a zero // Some relative translations may be zero. We treat nodes that have a zero
// relativeTranslation as a single node. // relativeTranslation as a single node.
// A DSFMap is used to find sets of nodes that have a zero relative // A DSFMap is used to find sets of nodes that have a zero relative
// translation. Add the nodes in each edge to the DSFMap, and merge nodes that // translation. Add the nodes in each edge to the DSFMap, and merge nodes that
// are connected by a zero relative translation. // are connected by a zero relative translation.
DSFMap<Key> getSameTranslationDSFMap(
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations) {
DSFMap<Key> sameTranslationDSF; DSFMap<Key> sameTranslationDSF;
for (const auto &edge : relativeTranslations) { for (const auto &edge : relativeTranslations) {
Key key1 = sameTranslationDSF.find(edge.key1()); Key key1 = sameTranslationDSF.find(edge.key1());
@ -56,94 +56,152 @@ TranslationRecovery::TranslationRecovery(
sameTranslationDSF.merge(key1, key2); sameTranslationDSF.merge(key1, key2);
} }
} }
// Use only those edges for which two keys have a distinct root in the DSFMap. return sameTranslationDSF;
for (const auto &edge : relativeTranslations) {
Key key1 = sameTranslationDSF.find(edge.key1());
Key key2 = sameTranslationDSF.find(edge.key2());
if (key1 == key2) continue;
relativeTranslations_.emplace_back(key1, key2, edge.measured(),
edge.noiseModel());
}
// Store the DSF map for post-processing results.
sameTranslationNodes_ = sameTranslationDSF.sets();
} }
NonlinearFactorGraph TranslationRecovery::buildGraph() const { // Removes zero-translation edges from measurements, and combines the nodes in
// these edges into a single node.
template <typename T>
std::vector<BinaryMeasurement<T>> removeSameTranslationNodes(
const std::vector<BinaryMeasurement<T>> &edges,
const DSFMap<Key> &sameTranslationDSFMap) {
std::vector<BinaryMeasurement<T>> newEdges;
for (const auto &edge : edges) {
Key key1 = sameTranslationDSFMap.find(edge.key1());
Key key2 = sameTranslationDSFMap.find(edge.key2());
if (key1 == key2) continue;
newEdges.emplace_back(key1, key2, edge.measured(), edge.noiseModel());
}
return newEdges;
}
// Adds nodes that were not optimized for because they were connected
// to another node with a zero-translation edge in the input.
Values addSameTranslationNodes(const Values &result,
const DSFMap<Key> &sameTranslationDSFMap) {
Values final_result = result;
// Nodes that were not optimized are stored in sameTranslationNodes_ as a map
// from a key that was optimized to keys that were not optimized. Iterate over
// map and add results for keys not optimized.
for (const auto &optimizedAndDuplicateKeys : sameTranslationDSFMap.sets()) {
Key optimizedKey = optimizedAndDuplicateKeys.first;
std::set<Key> duplicateKeys = optimizedAndDuplicateKeys.second;
// Add the result for the duplicate key if it does not already exist.
for (const Key duplicateKey : duplicateKeys) {
if (final_result.exists(duplicateKey)) continue;
final_result.insert<Point3>(duplicateKey,
final_result.at<Point3>(optimizedKey));
}
}
return final_result;
}
NonlinearFactorGraph TranslationRecovery::buildGraph(
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations) const {
NonlinearFactorGraph graph; NonlinearFactorGraph graph;
// Add all relative translation edges // Add translation factors for input translation directions.
for (auto edge : relativeTranslations_) { for (auto edge : relativeTranslations) {
graph.emplace_shared<TranslationFactor>(edge.key1(), edge.key2(), graph.emplace_shared<TranslationFactor>(edge.key1(), edge.key2(),
edge.measured(), edge.noiseModel()); edge.measured(), edge.noiseModel());
} }
return graph; return graph;
} }
void TranslationRecovery::addPrior( void TranslationRecovery::addPrior(
const double scale, NonlinearFactorGraph *graph, const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
const double scale,
const std::vector<BinaryMeasurement<Point3>> &betweenTranslations,
NonlinearFactorGraph *graph,
const SharedNoiseModel &priorNoiseModel) const { const SharedNoiseModel &priorNoiseModel) const {
auto edge = relativeTranslations_.begin(); auto edge = relativeTranslations.begin();
if (edge == relativeTranslations_.end()) return; if (edge == relativeTranslations.end()) return;
graph->emplace_shared<PriorFactor<Point3>>(edge->key1(), Point3(0, 0, 0), graph->emplace_shared<PriorFactor<Point3>>(edge->key1(), Point3(0, 0, 0),
priorNoiseModel); priorNoiseModel);
// Add between factors for optional relative translations.
for (auto edge : betweenTranslations) {
graph->emplace_shared<BetweenFactor<Point3>>(
edge.key1(), edge.key2(), edge.measured(), edge.noiseModel());
}
// Add a scale prior only if no other between factors were added.
if (betweenTranslations.empty()) {
graph->emplace_shared<PriorFactor<Point3>>( graph->emplace_shared<PriorFactor<Point3>>(
edge->key2(), scale * edge->measured().point3(), edge->noiseModel()); edge->key2(), scale * edge->measured().point3(), edge->noiseModel());
} }
}
Values TranslationRecovery::initializeRandomly(std::mt19937 *rng) const { Values TranslationRecovery::initializeRandomly(
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
std::mt19937 *rng, const Values &initialValues) const {
uniform_real_distribution<double> randomVal(-1, 1); uniform_real_distribution<double> randomVal(-1, 1);
// Create a lambda expression that checks whether value exists and randomly // Create a lambda expression that checks whether value exists and randomly
// initializes if not. // initializes if not.
Values initial; Values initial;
auto insert = [&](Key j) { auto insert = [&](Key j) {
if (!initial.exists(j)) { if (initial.exists(j)) return;
if (initialValues.exists(j)) {
initial.insert<Point3>(j, initialValues.at<Point3>(j));
} else {
initial.insert<Point3>( initial.insert<Point3>(
j, Point3(randomVal(*rng), randomVal(*rng), randomVal(*rng))); j, Point3(randomVal(*rng), randomVal(*rng), randomVal(*rng)));
} }
// Assumes all nodes connected by zero-edges have the same initialization.
}; };
// Loop over measurements and add a random translation // Loop over measurements and add a random translation
for (auto edge : relativeTranslations_) { for (auto edge : relativeTranslations) {
insert(edge.key1()); insert(edge.key1());
insert(edge.key2()); insert(edge.key2());
} }
// If there are no valid edges, but zero-distance edges exist, initialize one
// of the nodes in a connected component of zero-distance edges.
if (initial.empty() && !sameTranslationNodes_.empty()) {
for (const auto &optimizedAndDuplicateKeys : sameTranslationNodes_) {
Key optimizedKey = optimizedAndDuplicateKeys.first;
initial.insert<Point3>(optimizedKey, Point3(0, 0, 0));
}
}
return initial; return initial;
} }
Values TranslationRecovery::initializeRandomly() const { Values TranslationRecovery::initializeRandomly(
return initializeRandomly(&kRandomNumberGenerator); const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
const Values &initialValues) const {
return initializeRandomly(relativeTranslations, &kRandomNumberGenerator,
initialValues);
} }
Values TranslationRecovery::run(const double scale) const { Values TranslationRecovery::run(
auto graph = buildGraph(); const TranslationEdges &relativeTranslations, const double scale,
addPrior(scale, &graph); const std::vector<BinaryMeasurement<Point3>> &betweenTranslations,
const Values initial = initializeRandomly(); const Values &initialValues) const {
LevenbergMarquardtOptimizer lm(graph, initial, params_); // Find edges that have a zero-translation, and recompute relativeTranslations
Values result = lm.optimize(); // and betweenTranslations by retaining only one node for every zero-edge.
DSFMap<Key> sameTranslationDSFMap =
getSameTranslationDSFMap(relativeTranslations);
const TranslationEdges nonzeroRelativeTranslations =
removeSameTranslationNodes(relativeTranslations, sameTranslationDSFMap);
const std::vector<BinaryMeasurement<Point3>> nonzeroBetweenTranslations =
removeSameTranslationNodes(betweenTranslations, sameTranslationDSFMap);
// Nodes that were not optimized are stored in sameTranslationNodes_ as a map // Create graph of translation factors.
// from a key that was optimized to keys that were not optimized. Iterate over NonlinearFactorGraph graph = buildGraph(nonzeroRelativeTranslations);
// map and add results for keys not optimized.
for (const auto &optimizedAndDuplicateKeys : sameTranslationNodes_) { // Add global frame prior and scale (either from betweenTranslations or
// scale).
addPrior(nonzeroRelativeTranslations, scale, nonzeroBetweenTranslations,
&graph);
// Uses initial values from params if provided.
Values initial =
initializeRandomly(nonzeroRelativeTranslations, initialValues);
// If there are no valid edges, but zero-distance edges exist, initialize one
// of the nodes in a connected component of zero-distance edges.
if (initial.empty() && !sameTranslationDSFMap.sets().empty()) {
for (const auto &optimizedAndDuplicateKeys : sameTranslationDSFMap.sets()) {
Key optimizedKey = optimizedAndDuplicateKeys.first; Key optimizedKey = optimizedAndDuplicateKeys.first;
std::set<Key> duplicateKeys = optimizedAndDuplicateKeys.second; initial.insert<Point3>(optimizedKey, Point3(0, 0, 0));
// Add the result for the duplicate key if it does not already exist.
for (const Key duplicateKey : duplicateKeys) {
if (result.exists(duplicateKey)) continue;
result.insert<Point3>(duplicateKey, result.at<Point3>(optimizedKey));
} }
} }
return result;
LevenbergMarquardtOptimizer lm(graph, initial, lmParams_);
Values result = lm.optimize();
return addSameTranslationNodes(result, sameTranslationDSFMap);
} }
TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements( TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements(

View File

@ -11,7 +11,7 @@
/** /**
* @file TranslationRecovery.h * @file TranslationRecovery.h
* @author Frank Dellaert * @author Frank Dellaert, Akshay Krishnan
* @date March 2020 * @date March 2020
* @brief Recovering translations in an epipolar graph when rotations are given. * @brief Recovering translations in an epipolar graph when rotations are given.
*/ */
@ -57,68 +57,99 @@ class TranslationRecovery {
// Translation directions between camera pairs. // Translation directions between camera pairs.
TranslationEdges relativeTranslations_; TranslationEdges relativeTranslations_;
// Parameters used by the LM Optimizer. // Parameters.
LevenbergMarquardtParams params_; LevenbergMarquardtParams lmParams_;
// Map from a key in the graph to a set of keys that share the same
// translation.
std::map<Key, std::set<Key>> sameTranslationNodes_;
public: public:
/** /**
* @brief Construct a new Translation Recovery object * @brief Construct a new Translation Recovery object
* *
* @param relativeTranslations the relative translations, in world coordinate * @param lmParams parameters for optimization.
* frames, vector of BinaryMeasurements of Unit3, where each key of a
* measurement is a point in 3D.
* @param lmParams (optional) gtsam::LavenbergMarquardtParams that can be
* used to modify the parameters for the LM optimizer. By default, uses the
* default LM parameters.
*/ */
TranslationRecovery( TranslationRecovery(const LevenbergMarquardtParams &lmParams)
const TranslationEdges &relativeTranslations, : lmParams_(lmParams) {}
const LevenbergMarquardtParams &lmParams = LevenbergMarquardtParams());
/**
* @brief Default constructor.
*/
TranslationRecovery() = default;
/** /**
* @brief Build the factor graph to do the optimization. * @brief Build the factor graph to do the optimization.
* *
* @param relativeTranslations unit translation directions between
* translations to be estimated
* @return NonlinearFactorGraph * @return NonlinearFactorGraph
*/ */
NonlinearFactorGraph buildGraph() const; NonlinearFactorGraph buildGraph(
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations) const;
/** /**
* @brief Add priors on ednpoints of first measurement edge. * @brief Add 3 factors to the graph:
* - A prior on the first point to lie at (0, 0, 0)
* - If betweenTranslations is non-empty, between factors provided by it.
* - If betweenTranslations is empty, a prior on scale of the first
* relativeTranslations edge.
* *
* @param relativeTranslations unit translation directions between
* translations to be estimated
* @param scale scale for first relative translation which fixes gauge. * @param scale scale for first relative translation which fixes gauge.
* @param graph factor graph to which prior is added. * @param graph factor graph to which prior is added.
* @param betweenTranslations relative translations (with scale) between 2
* points in world coordinate frame known a priori.
* @param priorNoiseModel the noise model to use with the prior. * @param priorNoiseModel the noise model to use with the prior.
*/ */
void addPrior(const double scale, NonlinearFactorGraph *graph, void addPrior(
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
const double scale,
const std::vector<BinaryMeasurement<Point3>> &betweenTranslations,
NonlinearFactorGraph *graph,
const SharedNoiseModel &priorNoiseModel = const SharedNoiseModel &priorNoiseModel =
noiseModel::Isotropic::Sigma(3, 0.01)) const; noiseModel::Isotropic::Sigma(3, 0.01)) const;
/** /**
* @brief Create random initial translations. * @brief Create random initial translations.
* *
* @param relativeTranslations unit translation directions between
* translations to be estimated
* @param rng random number generator * @param rng random number generator
* @param intialValues (optional) initial values from a prior
* @return Values * @return Values
*/ */
Values initializeRandomly(std::mt19937 *rng) const; Values initializeRandomly(
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
std::mt19937 *rng, const Values &initialValues = Values()) const;
/** /**
* @brief Version of initializeRandomly with a fixed seed. * @brief Version of initializeRandomly with a fixed seed.
* *
* @param relativeTranslations unit translation directions between
* translations to be estimated
* @param initialValues (optional) initial values from a prior
* @return Values * @return Values
*/ */
Values initializeRandomly() const; Values initializeRandomly(
const std::vector<BinaryMeasurement<Unit3>> &relativeTranslations,
const Values &initialValues = Values()) const;
/** /**
* @brief Build and optimize factor graph. * @brief Build and optimize factor graph.
* *
* @param relativeTranslations the relative translations, in world coordinate
* frames, vector of BinaryMeasurements of Unit3, where each key of a
* measurement is a point in 3D.
* @param scale scale for first relative translation which fixes gauge. * @param scale scale for first relative translation which fixes gauge.
* The scale is only used if betweenTranslations is empty.
* @param betweenTranslations relative translations (with scale) between 2
* points in world coordinate frame known a priori.
* @param initialValues intial values for optimization. Initializes randomly
* if not provided.
* @return Values * @return Values
*/ */
Values run(const double scale = 1.0) const; Values run(
const TranslationEdges &relativeTranslations, const double scale = 1.0,
const std::vector<BinaryMeasurement<Point3>> &betweenTranslations = {},
const Values &initialValues = Values()) const;
/** /**
* @brief Simulate translation direction measurements * @brief Simulate translation direction measurements

View File

@ -4,6 +4,8 @@
namespace gtsam { namespace gtsam {
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
#include <gtsam/nonlinear/Values.h>
#include <gtsam/sfm/SfmTrack.h> #include <gtsam/sfm/SfmTrack.h>
class SfmTrack { class SfmTrack {
SfmTrack(); SfmTrack();
@ -88,6 +90,7 @@ class BinaryMeasurement {
typedef gtsam::BinaryMeasurement<gtsam::Unit3> BinaryMeasurementUnit3; typedef gtsam::BinaryMeasurement<gtsam::Unit3> BinaryMeasurementUnit3;
typedef gtsam::BinaryMeasurement<gtsam::Rot3> BinaryMeasurementRot3; typedef gtsam::BinaryMeasurement<gtsam::Rot3> BinaryMeasurementRot3;
typedef gtsam::BinaryMeasurement<gtsam::Point3> BinaryMeasurementPoint3;
class BinaryMeasurementsUnit3 { class BinaryMeasurementsUnit3 {
BinaryMeasurementsUnit3(); BinaryMeasurementsUnit3();
@ -96,6 +99,13 @@ class BinaryMeasurementsUnit3 {
void push_back(const gtsam::BinaryMeasurement<gtsam::Unit3>& measurement); void push_back(const gtsam::BinaryMeasurement<gtsam::Unit3>& measurement);
}; };
class BinaryMeasurementsPoint3 {
BinaryMeasurementsPoint3();
size_t size() const;
gtsam::BinaryMeasurement<gtsam::Point3> at(size_t idx) const;
void push_back(const gtsam::BinaryMeasurement<gtsam::Point3>& measurement);
};
class BinaryMeasurementsRot3 { class BinaryMeasurementsRot3 {
BinaryMeasurementsRot3(); BinaryMeasurementsRot3();
size_t size() const; size_t size() const;
@ -268,15 +278,36 @@ class MFAS {
}; };
#include <gtsam/sfm/TranslationRecovery.h> #include <gtsam/sfm/TranslationRecovery.h>
class TranslationRecovery { class TranslationRecovery {
TranslationRecovery( TranslationRecovery(const gtsam::LevenbergMarquardtParams& lmParams);
TranslationRecovery(); // default params.
void addPrior(const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
const double scale,
const gtsam::BinaryMeasurementsPoint3& betweenTranslations,
gtsam::NonlinearFactorGraph @graph,
const gtsam::SharedNoiseModel& priorNoiseModel) const;
void addPrior(const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
const double scale,
const gtsam::BinaryMeasurementsPoint3& betweenTranslations,
gtsam::NonlinearFactorGraph @graph) const;
gtsam::NonlinearFactorGraph buildGraph(
const gtsam::BinaryMeasurementsUnit3& relativeTranslations) const;
gtsam::Values run(const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
const double scale,
const gtsam::BinaryMeasurementsPoint3& betweenTranslations,
const gtsam::Values& initialValues) const;
// default random initial values
gtsam::Values run(
const gtsam::BinaryMeasurementsUnit3& relativeTranslations, const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
const gtsam::LevenbergMarquardtParams& lmParams); const double scale,
TranslationRecovery( const gtsam::BinaryMeasurementsPoint3& betweenTranslations) const;
const gtsam::BinaryMeasurementsUnit3& // default empty betweenTranslations
relativeTranslations); // default LevenbergMarquardtParams gtsam::Values run(const gtsam::BinaryMeasurementsUnit3& relativeTranslations,
gtsam::Values run(const double scale) const; const double scale) const;
gtsam::Values run() const; // default scale = 1.0 // default scale = 1.0, empty betweenTranslations
gtsam::Values run(
const gtsam::BinaryMeasurementsUnit3& relativeTranslations) const;
}; };
} // namespace gtsam } // namespace gtsam

View File

@ -223,6 +223,7 @@ parse3DFactors(const std::string &filename,
size_t maxIndex = 0); size_t maxIndex = 0);
using BinaryMeasurementsUnit3 = std::vector<BinaryMeasurement<Unit3>>; using BinaryMeasurementsUnit3 = std::vector<BinaryMeasurement<Unit3>>;
using BinaryMeasurementsPoint3 = std::vector<BinaryMeasurement<Point3>>;
using BinaryMeasurementsRot3 = std::vector<BinaryMeasurement<Rot3>>; using BinaryMeasurementsRot3 = std::vector<BinaryMeasurement<Rot3>>;
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42

View File

@ -5,12 +5,16 @@
* @date Nov 4, 2014 * @date Nov 4, 2014
*/ */
#include <gtsam/base/Vector.h>
#include <gtsam/nonlinear/PriorFactor.h>
#include <CppUnitLite/TestHarness.h> #include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Vector.h>
#include <gtsam/navigation/ImuBias.h>
#include <gtsam/nonlinear/PriorFactor.h>
#include <gtsam/nonlinear/factorTesting.h>
using namespace std; using namespace std;
using namespace std::placeholders;
using namespace gtsam; using namespace gtsam;
using namespace imuBias;
/* ************************************************************************* */ /* ************************************************************************* */
@ -28,11 +32,39 @@ TEST(PriorFactor, ConstructorVector3) {
// Constructor dynamic sized vector // Constructor dynamic sized vector
TEST(PriorFactor, ConstructorDynamicSizeVector) { TEST(PriorFactor, ConstructorDynamicSizeVector) {
Vector v(5); v << 1, 2, 3, 4, 5; Vector v(5);
v << 1, 2, 3, 4, 5;
SharedNoiseModel model = noiseModel::Isotropic::Sigma(5, 1.0); SharedNoiseModel model = noiseModel::Isotropic::Sigma(5, 1.0);
PriorFactor<Vector> factor(1, v, model); PriorFactor<Vector> factor(1, v, model);
} }
Vector callEvaluateError(const PriorFactor<ConstantBias>& factor,
const ConstantBias& bias) {
return factor.evaluateError(bias);
}
// Test for imuBias::ConstantBias
TEST(PriorFactor, ConstantBias) {
Vector3 biasAcc(1, 2, 3);
Vector3 biasGyro(0.1, 0.2, 0.3);
ConstantBias bias(biasAcc, biasGyro);
PriorFactor<ConstantBias> factor(1, bias,
noiseModel::Isotropic::Sigma(6, 0.1));
Values values;
values.insert(1, bias);
EXPECT_DOUBLES_EQUAL(0.0, factor.error(values), 1e-8);
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
ConstantBias incorrectBias(
(Vector6() << 1.1, 2.1, 3.1, 0.2, 0.3, 0.4).finished());
values.clear();
values.insert(1, incorrectBias);
EXPECT_DOUBLES_EQUAL(3.0, factor.error(values), 1e-8);
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
}
/* ************************************************************************* */ /* ************************************************************************* */
int main() { int main() {
TestResult tr; TestResult tr;

View File

@ -104,16 +104,16 @@ TEST(SymbolicBayesNet, Dot) {
"digraph {\n" "digraph {\n"
" size=\"5,5\";\n" " size=\"5,5\";\n"
"\n" "\n"
" vara1[label=\"a1\", pos=\"1,2!\", shape=box];\n" " var6989586621679009793[label=\"a1\", pos=\"1,2!\", shape=box];\n"
" vara2[label=\"a2\", pos=\"2,2!\", shape=box];\n" " var6989586621679009794[label=\"a2\", pos=\"2,2!\", shape=box];\n"
" varx1[label=\"x1\", pos=\"1,1!\"];\n" " var8646911284551352321[label=\"x1\", pos=\"1,1!\"];\n"
" varx2[label=\"x2\", pos=\"2,1!\"];\n" " var8646911284551352322[label=\"x2\", pos=\"2,1!\"];\n"
" varx3[label=\"x3\", pos=\"3,1!\"];\n" " var8646911284551352323[label=\"x3\", pos=\"3,1!\"];\n"
"\n" "\n"
" varx1->varx2\n" " var8646911284551352321->var8646911284551352322\n"
" vara1->varx2\n" " var6989586621679009793->var8646911284551352322\n"
" varx2->varx3\n" " var8646911284551352322->var8646911284551352323\n"
" vara2->varx3\n" " var6989586621679009794->var8646911284551352323\n"
"}"); "}");
} }

View File

@ -47,6 +47,7 @@ set(ignore
gtsam::Pose3Vector gtsam::Pose3Vector
gtsam::Rot3Vector gtsam::Rot3Vector
gtsam::KeyVector gtsam::KeyVector
gtsam::BinaryMeasurementsPoint3
gtsam::BinaryMeasurementsUnit3 gtsam::BinaryMeasurementsUnit3
gtsam::BinaryMeasurementsRot3 gtsam::BinaryMeasurementsRot3
gtsam::DiscreteKey gtsam::DiscreteKey
@ -138,6 +139,7 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
gtsam::Pose3Vector gtsam::Pose3Vector
gtsam::KeyVector gtsam::KeyVector
gtsam::FixedLagSmootherKeyTimestampMapValue gtsam::FixedLagSmootherKeyTimestampMapValue
gtsam::BinaryMeasurementsPoint3
gtsam::BinaryMeasurementsUnit3 gtsam::BinaryMeasurementsUnit3
gtsam::BinaryMeasurementsRot3 gtsam::BinaryMeasurementsRot3
gtsam::CameraSetCal3_S2 gtsam::CameraSetCal3_S2
@ -187,7 +189,7 @@ endif()
# Add custom target so we can install with `make python-install` # Add custom target so we can install with `make python-install`
set(GTSAM_PYTHON_INSTALL_TARGET python-install) set(GTSAM_PYTHON_INSTALL_TARGET python-install)
add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET} add_custom_target(${GTSAM_PYTHON_INSTALL_TARGET}
COMMAND ${PYTHON_EXECUTABLE} -m pip install --user . COMMAND ${PYTHON_EXECUTABLE} -m pip install .
DEPENDS ${GTSAM_PYTHON_DEPENDENCIES} DEPENDS ${GTSAM_PYTHON_DEPENDENCIES}
WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY}) WORKING_DIRECTORY ${GTSAM_PYTHON_BUILD_DIRECTORY})

View File

@ -123,7 +123,7 @@ def estimate_poses(i_iZj_list: gtsam.BinaryMeasurementsUnit3,
w_iZj_inliers = filter_outliers(w_iZj_list) w_iZj_inliers = filter_outliers(w_iZj_list)
# Run the optimizer to obtain translations for normalized directions. # Run the optimizer to obtain translations for normalized directions.
wtc_values = gtsam.TranslationRecovery(w_iZj_inliers).run() wtc_values = gtsam.TranslationRecovery().run(w_iZj_inliers)
wTc_values = gtsam.Values() wTc_values = gtsam.Values()
for key in wRc_values.keys(): for key in wRc_values.keys():

View File

@ -0,0 +1,133 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Ellipse Scaling\n",
"\n",
"The code to calculate the percentages included in ellipses with various values of \"k\" in `plot.py`.\n",
"\n",
"Thanks to @senselessDev, January 26, for providing the code in [PR #1067](https://github.com/borglab/gtsam/pull/1067)."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"import scipy\n",
"import scipy.stats\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def pct_to_sigma(pct, dof):\n",
" return np.sqrt(scipy.stats.chi2.ppf(pct / 100., df=dof))\n",
"\n",
"def sigma_to_pct(sigma, dof):\n",
" return scipy.stats.chi2.cdf(sigma**2, df=dof) * 100."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0D\t 1 \t 2 \t 3 \t 4 \t 5 \n",
"1D\t68.26895%\t95.44997%\t99.73002%\t99.99367%\t99.99994%\n",
"2D\t39.34693%\t86.46647%\t98.88910%\t99.96645%\t99.99963%\n",
"3D\t19.87480%\t73.85359%\t97.07091%\t99.88660%\t99.99846%\n"
]
}
],
"source": [
"for dof in range(0, 4):\n",
" print(\"{}D\".format(dof), end=\"\")\n",
" for sigma in range(1, 6):\n",
" if dof == 0: print(\"\\t {} \".format(sigma), end=\"\")\n",
" else: print(\"\\t{:.5f}%\".format(sigma_to_pct(sigma, dof)), end=\"\")\n",
" print()"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1D\n",
"\n",
"pct=50.0 -> sigma=0.674489750196\n",
"pct=95.0 -> sigma=1.959963984540\n",
"pct=99.0 -> sigma=2.575829303549\n",
"\n",
"2D\n",
"\n",
"pct=50.0 -> sigma=1.177410022515\n",
"pct=95.0 -> sigma=2.447746830681\n",
"pct=99.0 -> sigma=3.034854258770\n",
"\n",
"3D\n",
"\n",
"pct=50.0 -> sigma=1.538172254455\n",
"pct=95.0 -> sigma=2.795483482915\n",
"pct=99.0 -> sigma=3.368214175219\n",
"\n"
]
}
],
"source": [
"for dof in range(1, 4):\n",
" print(\"{}D\\n\".format(dof))\n",
" for pct in [50, 95, 99]:\n",
" print(\"pct={:.1f} -> sigma={:.12f}\".format(pct, pct_to_sigma(pct, dof)))\n",
" print()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"interpreter": {
"hash": "4d608302ba82e7596903db5446e6fa05f049271852e8cc6e1cafaafe5fbd9fed"
},
"kernelspec": {
"display_name": "Python 3.8.13 ('gtsfm-v1')",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.13"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -21,6 +21,8 @@ py::bind_vector<std::vector<gtsam::Pose3Pair>>(m_, "Pose3Pairs");
py::bind_vector<std::vector<gtsam::Pose3>>(m_, "Pose3Vector"); py::bind_vector<std::vector<gtsam::Pose3>>(m_, "Pose3Vector");
py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3_S2>>>( py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3_S2>>>(
m_, "CameraSetCal3_S2"); m_, "CameraSetCal3_S2");
py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3DS2>>>(
m_, "CameraSetCal3DS2");
py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Bundler>>>( py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Bundler>>>(
m_, "CameraSetCal3Bundler"); m_, "CameraSetCal3Bundler");
py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Unified>>>( py::bind_vector<gtsam::CameraSet<gtsam::PinholeCamera<gtsam::Cal3Unified>>>(

View File

@ -11,6 +11,8 @@
* and saves one copy operation. * and saves one copy operation.
*/ */
py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Point3> > >(
m_, "BinaryMeasurementsPoint3");
py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Unit3> > >( py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Unit3> > >(
m_, "BinaryMeasurementsUnit3"); m_, "BinaryMeasurementsUnit3");
py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Rot3> > >( py::bind_vector<std::vector<gtsam::BinaryMeasurement<gtsam::Rot3> > >(

View File

@ -11,12 +11,12 @@ Author: Frank Dellaert
# pylint: disable=no-name-in-module, invalid-name # pylint: disable=no-name-in-module, invalid-name
import unittest
import textwrap import textwrap
import unittest
import gtsam import gtsam
from gtsam import (DiscreteBayesNet, DiscreteConditional, DiscreteFactorGraph, from gtsam import (DiscreteBayesNet, DiscreteConditional, DiscreteDistribution,
DiscreteKeys, DiscreteDistribution, DiscreteValues, Ordering) DiscreteFactorGraph, DiscreteKeys, DiscreteValues, Ordering)
from gtsam.utils.test_case import GtsamTestCase from gtsam.utils.test_case import GtsamTestCase
# Some keys: # Some keys:
@ -152,10 +152,10 @@ class TestDiscreteBayesNet(GtsamTestCase):
var4[label="4"]; var4[label="4"];
var5[label="5"]; var5[label="5"];
var6[label="6"]; var6[label="6"];
vara0[label="a0", pos="0,2!"]; var6989586621679009792[label="a0", pos="0,2!"];
var4->var6 var4->var6
vara0->var3 var6989586621679009792->var3
var3->var5 var3->var5
var6->var5 var6->var5
}""" }"""

View File

@ -15,12 +15,10 @@ from __future__ import print_function
import unittest import unittest
import gtsam import gtsam
from gtsam import (DoglegOptimizer, DoglegParams, from gtsam import (DoglegOptimizer, DoglegParams, DummyPreconditionerParameters,
DummyPreconditionerParameters, GaussNewtonOptimizer, GaussNewtonOptimizer, GaussNewtonParams, GncLMParams, GncLMOptimizer,
GaussNewtonParams, GncLMParams, GncLMOptimizer, LevenbergMarquardtOptimizer, LevenbergMarquardtParams, NonlinearFactorGraph,
LevenbergMarquardtOptimizer, LevenbergMarquardtParams, Ordering, PCGSolverParameters, Point2, PriorFactorPoint2, Values)
NonlinearFactorGraph, Ordering,
PCGSolverParameters, Point2, PriorFactorPoint2, Values)
from gtsam.utils.test_case import GtsamTestCase from gtsam.utils.test_case import GtsamTestCase
KEY1 = 1 KEY1 = 1
@ -28,63 +26,83 @@ KEY2 = 2
class TestScenario(GtsamTestCase): class TestScenario(GtsamTestCase):
def test_optimize(self):
"""Do trivial test with three optimizer variants.""" """Do trivial test with three optimizer variants."""
fg = NonlinearFactorGraph()
def setUp(self):
"""Set up the optimization problem and ordering"""
# create graph
self.fg = NonlinearFactorGraph()
model = gtsam.noiseModel.Unit.Create(2) model = gtsam.noiseModel.Unit.Create(2)
fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model)) self.fg.add(PriorFactorPoint2(KEY1, Point2(0, 0), model))
# test error at minimum # test error at minimum
xstar = Point2(0, 0) xstar = Point2(0, 0)
optimal_values = Values() self.optimal_values = Values()
optimal_values.insert(KEY1, xstar) self.optimal_values.insert(KEY1, xstar)
self.assertEqual(0.0, fg.error(optimal_values), 0.0) self.assertEqual(0.0, self.fg.error(self.optimal_values), 0.0)
# test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 = # test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
x0 = Point2(3, 3) x0 = Point2(3, 3)
initial_values = Values() self.initial_values = Values()
initial_values.insert(KEY1, x0) self.initial_values.insert(KEY1, x0)
self.assertEqual(9.0, fg.error(initial_values), 1e-3) self.assertEqual(9.0, self.fg.error(self.initial_values), 1e-3)
# optimize parameters # optimize parameters
ordering = Ordering() self.ordering = Ordering()
ordering.push_back(KEY1) self.ordering.push_back(KEY1)
# Gauss-Newton def test_gauss_newton(self):
gnParams = GaussNewtonParams() gnParams = GaussNewtonParams()
gnParams.setOrdering(ordering) gnParams.setOrdering(self.ordering)
actual1 = GaussNewtonOptimizer(fg, initial_values, gnParams).optimize() actual = GaussNewtonOptimizer(self.fg, self.initial_values, gnParams).optimize()
self.assertAlmostEqual(0, fg.error(actual1)) self.assertAlmostEqual(0, self.fg.error(actual))
# Levenberg-Marquardt def test_levenberg_marquardt(self):
lmParams = LevenbergMarquardtParams.CeresDefaults() lmParams = LevenbergMarquardtParams.CeresDefaults()
lmParams.setOrdering(ordering) lmParams.setOrdering(self.ordering)
actual2 = LevenbergMarquardtOptimizer( actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize()
fg, initial_values, lmParams).optimize() self.assertAlmostEqual(0, self.fg.error(actual))
self.assertAlmostEqual(0, fg.error(actual2))
# Levenberg-Marquardt def test_levenberg_marquardt_pcg(self):
lmParams = LevenbergMarquardtParams.CeresDefaults() lmParams = LevenbergMarquardtParams.CeresDefaults()
lmParams.setLinearSolverType("ITERATIVE") lmParams.setLinearSolverType("ITERATIVE")
cgParams = PCGSolverParameters() cgParams = PCGSolverParameters()
cgParams.setPreconditionerParams(DummyPreconditionerParameters()) cgParams.setPreconditionerParams(DummyPreconditionerParameters())
lmParams.setIterativeParams(cgParams) lmParams.setIterativeParams(cgParams)
actual2 = LevenbergMarquardtOptimizer( actual = LevenbergMarquardtOptimizer(self.fg, self.initial_values, lmParams).optimize()
fg, initial_values, lmParams).optimize() self.assertAlmostEqual(0, self.fg.error(actual))
self.assertAlmostEqual(0, fg.error(actual2))
# Dogleg def test_dogleg(self):
dlParams = DoglegParams() dlParams = DoglegParams()
dlParams.setOrdering(ordering) dlParams.setOrdering(self.ordering)
actual3 = DoglegOptimizer(fg, initial_values, dlParams).optimize() actual = DoglegOptimizer(self.fg, self.initial_values, dlParams).optimize()
self.assertAlmostEqual(0, fg.error(actual3)) self.assertAlmostEqual(0, self.fg.error(actual))
# Graduated Non-Convexity (GNC) def test_graduated_non_convexity(self):
gncParams = GncLMParams() gncParams = GncLMParams()
actual4 = GncLMOptimizer(fg, initial_values, gncParams).optimize() actual = GncLMOptimizer(self.fg, self.initial_values, gncParams).optimize()
self.assertAlmostEqual(0, fg.error(actual4)) self.assertAlmostEqual(0, self.fg.error(actual))
def test_iteration_hook(self):
# set up iteration hook to track some testable values
iteration_count = 0
final_error = 0
final_values = None
def iteration_hook(iter, error_before, error_after):
nonlocal iteration_count, final_error, final_values
iteration_count = iter
final_error = error_after
final_values = optimizer.values()
# optimize
params = LevenbergMarquardtParams.CeresDefaults()
params.setOrdering(self.ordering)
params.iterationHook = iteration_hook
optimizer = LevenbergMarquardtOptimizer(self.fg, self.initial_values, params)
actual = optimizer.optimize()
self.assertAlmostEqual(0, self.fg.error(actual))
self.gtsamAssertEquals(final_values, actual)
self.assertEqual(self.fg.error(actual), final_error)
self.assertEqual(optimizer.iterations(), iteration_count)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()

View File

@ -34,8 +34,10 @@ class TestTranslationRecovery(unittest.TestCase):
def test_constructor(self): def test_constructor(self):
"""Construct from binary measurements.""" """Construct from binary measurements."""
algorithm = gtsam.TranslationRecovery(gtsam.BinaryMeasurementsUnit3()) algorithm = gtsam.TranslationRecovery()
self.assertIsInstance(algorithm, gtsam.TranslationRecovery) self.assertIsInstance(algorithm, gtsam.TranslationRecovery)
algorithm_params = gtsam.TranslationRecovery(gtsam.LevenbergMarquardtParams())
self.assertIsInstance(algorithm_params, gtsam.TranslationRecovery)
def test_run(self): def test_run(self):
gt_poses = ExampleValues() gt_poses = ExampleValues()
@ -45,9 +47,9 @@ class TestTranslationRecovery(unittest.TestCase):
lmParams = gtsam.LevenbergMarquardtParams() lmParams = gtsam.LevenbergMarquardtParams()
lmParams.setVerbosityLM("silent") lmParams.setVerbosityLM("silent")
algorithm = gtsam.TranslationRecovery(measurements, lmParams) algorithm = gtsam.TranslationRecovery(lmParams)
scale = 2.0 scale = 2.0
result = algorithm.run(scale) result = algorithm.run(measurements, scale)
w_aTc = gt_poses.atPose3(2).translation() - gt_poses.atPose3(0).translation() w_aTc = gt_poses.atPose3(2).translation() - gt_poses.atPose3(0).translation()
w_aTb = gt_poses.atPose3(1).translation() - gt_poses.atPose3(0).translation() w_aTb = gt_poses.atPose3(1).translation() - gt_poses.atPose3(0).translation()

View File

@ -8,26 +8,17 @@ See LICENSE for the license information
Test Triangulation Test Triangulation
Authors: Frank Dellaert & Fan Jiang (Python) & Sushmita Warrier & John Lambert Authors: Frank Dellaert & Fan Jiang (Python) & Sushmita Warrier & John Lambert
""" """
# pylint: disable=no-name-in-module, invalid-name, no-member
import unittest import unittest
from typing import Iterable, List, Optional, Tuple, Union from typing import Iterable, List, Optional, Tuple, Union
import numpy as np import numpy as np
import gtsam import gtsam
from gtsam import ( from gtsam import (Cal3_S2, Cal3Bundler, CameraSetCal3_S2,
Cal3_S2, CameraSetCal3Bundler, PinholeCameraCal3_S2,
Cal3Bundler, PinholeCameraCal3Bundler, Point2, Point2Vector, Point3,
CameraSetCal3_S2, Pose3, Pose3Vector, Rot3, TriangulationParameters,
CameraSetCal3Bundler, TriangulationResult)
PinholeCameraCal3_S2,
PinholeCameraCal3Bundler,
Point2,
Point2Vector,
Point3,
Pose3,
Pose3Vector,
Rot3,
)
from gtsam.utils.test_case import GtsamTestCase from gtsam.utils.test_case import GtsamTestCase
UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2) UPRIGHT = Rot3.Ypr(-np.pi / 2, 0.0, -np.pi / 2)
@ -218,6 +209,68 @@ class TestTriangulationExample(GtsamTestCase):
# using the Huber loss we now have a quite small error!! nice! # using the Huber loss we now have a quite small error!! nice!
self.assertTrue(np.allclose(landmark, actual4, atol=0.05)) self.assertTrue(np.allclose(landmark, actual4, atol=0.05))
def test_outliers_and_far_landmarks(self) -> None:
"""Check safe triangulation function."""
pose1, pose2 = self.poses
K1 = Cal3_S2(1500, 1200, 0, 640, 480)
# create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
camera1 = PinholeCameraCal3_S2(pose1, K1)
# create second camera 1 meter to the right of first camera
K2 = Cal3_S2(1600, 1300, 0, 650, 440)
camera2 = PinholeCameraCal3_S2(pose2, K2)
# 1. Project two landmarks into two cameras and triangulate
z1 = camera1.project(self.landmark)
z2 = camera2.project(self.landmark)
cameras = CameraSetCal3_S2()
measurements = Point2Vector()
cameras.append(camera1)
cameras.append(camera2)
measurements.append(z1)
measurements.append(z2)
landmarkDistanceThreshold = 10 # landmark is closer than that
# all default except landmarkDistanceThreshold:
params = TriangulationParameters(1.0, False, landmarkDistanceThreshold)
actual: TriangulationResult = gtsam.triangulateSafe(
cameras, measurements, params)
self.gtsamAssertEquals(actual.get(), self.landmark, 1e-2)
self.assertTrue(actual.valid())
landmarkDistanceThreshold = 4 # landmark is farther than that
params2 = TriangulationParameters(
1.0, False, landmarkDistanceThreshold)
actual = gtsam.triangulateSafe(cameras, measurements, params2)
self.assertTrue(actual.farPoint())
# 3. Add a slightly rotated third camera above with a wrong measurement
# (OUTLIER)
pose3 = pose1 * Pose3(Rot3.Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1))
K3 = Cal3_S2(700, 500, 0, 640, 480)
camera3 = PinholeCameraCal3_S2(pose3, K3)
z3 = camera3.project(self.landmark)
cameras.append(camera3)
measurements.append(z3 + Point2(10, -10))
landmarkDistanceThreshold = 10 # landmark is closer than that
outlierThreshold = 100 # loose, the outlier is going to pass
params3 = TriangulationParameters(1.0, False, landmarkDistanceThreshold,
outlierThreshold)
actual = gtsam.triangulateSafe(cameras, measurements, params3)
self.assertTrue(actual.valid())
# now set stricter threshold for outlier rejection
outlierThreshold = 5 # tighter, the outlier is not going to pass
params4 = TriangulationParameters(1.0, False, landmarkDistanceThreshold,
outlierThreshold)
actual = gtsam.triangulateSafe(cameras, measurements, params4)
self.assertTrue(actual.outlier())
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()

View File

@ -18,7 +18,7 @@ import numpy as np
from gtsam import Rot3 from gtsam import Rot3
from gtsam.utils.test_case import GtsamTestCase from gtsam.utils.test_case import GtsamTestCase
from gtsam.utils.logging_optimizer import gtsam_optimize from gtsam.utils.logging_optimizer import gtsam_optimize, optimize_using
KEY = 0 KEY = 0
MODEL = gtsam.noiseModel.Unit.Create(3) MODEL = gtsam.noiseModel.Unit.Create(3)
@ -34,19 +34,20 @@ class TestOptimizeComet(GtsamTestCase):
rotations = {R, R.inverse()} # mean is the identity rotations = {R, R.inverse()} # mean is the identity
self.expected = Rot3() self.expected = Rot3()
graph = gtsam.NonlinearFactorGraph() def check(actual):
for R in rotations: # Check that optimizing yields the identity
graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL)) self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6)
initial = gtsam.Values() # Check that logging output prints out 3 lines (exact intermediate values differ by OS)
initial.insert(KEY, R) self.assertEqual(self.capturedOutput.getvalue().count('\n'), 3)
self.params = gtsam.GaussNewtonParams() # reset stdout catcher
self.optimizer = gtsam.GaussNewtonOptimizer( self.capturedOutput.truncate(0)
graph, initial, self.params) self.check = check
self.lmparams = gtsam.LevenbergMarquardtParams() self.graph = gtsam.NonlinearFactorGraph()
self.lmoptimizer = gtsam.LevenbergMarquardtOptimizer( for R in rotations:
graph, initial, self.lmparams self.graph.add(gtsam.PriorFactorRot3(KEY, R, MODEL))
) self.initial = gtsam.Values()
self.initial.insert(KEY, R)
# setup output capture # setup output capture
self.capturedOutput = StringIO() self.capturedOutput = StringIO()
@ -63,22 +64,29 @@ class TestOptimizeComet(GtsamTestCase):
def hook(_, error): def hook(_, error):
print(error) print(error)
# Only thing we require from optimizer is an iterate method # Wrapper function sets the hook and calls optimizer.optimize() for us.
gtsam_optimize(self.optimizer, self.params, hook) params = gtsam.GaussNewtonParams()
actual = optimize_using(gtsam.GaussNewtonOptimizer, hook, self.graph, self.initial)
# Check that optimizing yields the identity. self.check(actual)
actual = self.optimizer.values() actual = optimize_using(gtsam.GaussNewtonOptimizer, hook, self.graph, self.initial, params)
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6) self.check(actual)
actual = gtsam_optimize(gtsam.GaussNewtonOptimizer(self.graph, self.initial, params),
params, hook)
self.check(actual)
def test_lm_simple_printing(self): def test_lm_simple_printing(self):
"""Make sure we are properly terminating LM""" """Make sure we are properly terminating LM"""
def hook(_, error): def hook(_, error):
print(error) print(error)
gtsam_optimize(self.lmoptimizer, self.lmparams, hook) params = gtsam.LevenbergMarquardtParams()
actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook, self.graph, self.initial)
actual = self.lmoptimizer.values() self.check(actual)
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected, tol=1e-6) actual = optimize_using(gtsam.LevenbergMarquardtOptimizer, hook, self.graph, self.initial,
params)
self.check(actual)
actual = gtsam_optimize(gtsam.LevenbergMarquardtOptimizer(self.graph, self.initial, params),
params, hook)
@unittest.skip("Not a test we want run every time, as needs comet.ml account") @unittest.skip("Not a test we want run every time, as needs comet.ml account")
def test_comet(self): def test_comet(self):

View File

@ -6,6 +6,53 @@ Author: Jing Wu and Frank Dellaert
from gtsam import NonlinearOptimizer, NonlinearOptimizerParams from gtsam import NonlinearOptimizer, NonlinearOptimizerParams
import gtsam import gtsam
from typing import Any, Callable
OPTIMIZER_PARAMS_MAP = {
gtsam.GaussNewtonOptimizer: gtsam.GaussNewtonParams,
gtsam.LevenbergMarquardtOptimizer: gtsam.LevenbergMarquardtParams,
gtsam.DoglegOptimizer: gtsam.DoglegParams,
gtsam.GncGaussNewtonOptimizer: gtsam.GaussNewtonParams,
gtsam.GncLMOptimizer: gtsam.LevenbergMarquardtParams
}
def optimize_using(OptimizerClass, hook, *args) -> gtsam.Values:
""" Wraps the constructor and "optimize()" call for an Optimizer together and adds an iteration
hook.
Example usage:
```python
def hook(optimizer, error):
print("iteration {:}, error = {:}".format(optimizer.iterations(), error))
solution = optimize_using(gtsam.GaussNewtonOptimizer, hook, graph, init, params)
```
Iteration hook's args are (optimizer, error) and return type should be None
Args:
OptimizerClass (T): A NonlinearOptimizer class (e.g. GaussNewtonOptimizer,
LevenbergMarquardtOptimizer)
hook ([T, double] -> None): Function to callback after each iteration. Args are (optimizer,
error) and return should be None.
*args: Arguments that would be passed into the OptimizerClass constructor, usually:
graph, init, [params]
Returns:
(gtsam.Values): A Values object representing the optimization solution.
"""
# Add the iteration hook to the NonlinearOptimizerParams
for arg in args:
if isinstance(arg, gtsam.NonlinearOptimizerParams):
arg.iterationHook = lambda iteration, error_before, error_after: hook(
optimizer, error_after)
break
else:
params = OPTIMIZER_PARAMS_MAP[OptimizerClass]()
params.iterationHook = lambda iteration, error_before, error_after: hook(
optimizer, error_after)
args = (*args, params)
# Construct Optimizer and optimize
optimizer = OptimizerClass(*args)
hook(optimizer, optimizer.error()) # Call hook once with init values to match behavior below
return optimizer.optimize()
def optimize(optimizer, check_convergence, hook): def optimize(optimizer, check_convergence, hook):
@ -21,7 +68,8 @@ def optimize(optimizer, check_convergence, hook):
current_error = optimizer.error() current_error = optimizer.error()
hook(optimizer, current_error) hook(optimizer, current_error)
# Iterative loop # Iterative loop. Cannot use `params.iterationHook` because we don't have access to params
# (backwards compatibility issue).
while True: while True:
# Do next iteration # Do next iteration
optimizer.iterate() optimizer.iterate()
@ -36,6 +84,7 @@ def gtsam_optimize(optimizer,
params, params,
hook): hook):
""" Given an optimizer and params, iterate until convergence. """ Given an optimizer and params, iterate until convergence.
Recommend using optimize_using instead.
After each iteration, hook(optimizer) is called. After each iteration, hook(optimizer) is called.
After the function, use values and errors to get the result. After the function, use values and errors to get the result.
Arguments: Arguments:

View File

@ -12,13 +12,26 @@ from mpl_toolkits.mplot3d import Axes3D # pylint: disable=unused-import
import gtsam import gtsam
from gtsam import Marginals, Point2, Point3, Pose2, Pose3, Values from gtsam import Marginals, Point2, Point3, Pose2, Pose3, Values
# For future reference: following
# https://www.xarg.org/2018/04/how-to-plot-a-covariance-error-ellipse/ # For translation between a scaling of the uncertainty ellipse and the
# we have, in 2D: # percentage of inliers see discussion in
# def kk(p): return math.sqrt(-2*math.log(1-p)) # k to get p probability mass # [PR 1067](https://github.com/borglab/gtsam/pull/1067)
# def pp(k): return 1-math.exp(-float(k**2)/2.0) # p as a function of k # and the notebook python/gtsam/notebooks/ellipses.ipynb (needs scipy).
# Some values: #
# k = 5 => p = 99.9996 % # In the following, the default scaling is chosen for 95% inliers, which
# translates to the following sigma values:
# 1D: 1.959963984540
# 2D: 2.447746830681
# 3D: 2.795483482915
#
# Further references are Stochastic Models, Estimation, and Control Vol 1 by Maybeck,
# page 366 and https://www.xarg.org/2018/04/how-to-plot-a-covariance-error-ellipse/
#
# For reference, here are the inlier percentages for some sigma values:
# 1 2 3 4 5
# 1D 68.26895 95.44997 99.73002 99.99367 99.99994
# 2D 39.34693 86.46647 98.88910 99.96645 99.99963
# 3D 19.87480 73.85359 97.07091 99.88660 99.99846
def set_axes_equal(fignum: int) -> None: def set_axes_equal(fignum: int) -> None:
""" """
@ -81,9 +94,8 @@ def plot_covariance_ellipse_3d(axes,
""" """
Plots a Gaussian as an uncertainty ellipse Plots a Gaussian as an uncertainty ellipse
Based on Maybeck Vol 1, page 366 The ellipse is scaled in such a way that 95% of drawn samples are inliers.
k=2.296 corresponds to 1 std, 68.26% of all probability Derivation of the scaling factor is explained at the beginning of this file.
k=11.82 corresponds to 3 std, 99.74% of all probability
Args: Args:
axes (matplotlib.axes.Axes): Matplotlib axes. axes (matplotlib.axes.Axes): Matplotlib axes.
@ -94,7 +106,8 @@ def plot_covariance_ellipse_3d(axes,
n: Defines the granularity of the ellipse. Higher values indicate finer ellipses. n: Defines the granularity of the ellipse. Higher values indicate finer ellipses.
alpha: Transparency value for the plotted surface in the range [0, 1]. alpha: Transparency value for the plotted surface in the range [0, 1].
""" """
k = 11.82 # this corresponds to 95%, see note above
k = 2.795483482915
U, S, _ = np.linalg.svd(P) U, S, _ = np.linalg.svd(P)
radii = k * np.sqrt(S) radii = k * np.sqrt(S)
@ -115,12 +128,48 @@ def plot_covariance_ellipse_3d(axes,
axes.plot_surface(x, y, z, alpha=alpha, cmap='hot') axes.plot_surface(x, y, z, alpha=alpha, cmap='hot')
def plot_covariance_ellipse_2d(axes,
origin: Point2,
covariance: np.ndarray) -> None:
"""
Plots a Gaussian as an uncertainty ellipse
The ellipse is scaled in such a way that 95% of drawn samples are inliers.
Derivation of the scaling factor is explained at the beginning of this file.
Args:
axes (matplotlib.axes.Axes): Matplotlib axes.
origin: The origin in the world frame.
covariance: The marginal covariance matrix of the 2D point
which will be represented as an ellipse.
"""
w, v = np.linalg.eigh(covariance)
# this corresponds to 95%, see note above
k = 2.447746830681
angle = np.arctan2(v[1, 0], v[0, 0])
# We multiply k by 2 since k corresponds to the radius but Ellipse uses
# the diameter.
e1 = patches.Ellipse(origin,
np.sqrt(w[0]) * 2 * k,
np.sqrt(w[1]) * 2 * k,
np.rad2deg(angle),
fill=False)
axes.add_patch(e1)
def plot_point2_on_axes(axes, def plot_point2_on_axes(axes,
point: Point2, point: Point2,
linespec: str, linespec: str,
P: Optional[np.ndarray] = None) -> None: P: Optional[np.ndarray] = None) -> None:
""" """
Plot a 2D point on given axis `axes` with given `linespec`. Plot a 2D point and its corresponding uncertainty ellipse on given axis
`axes` with given `linespec`.
The uncertainty ellipse (if covariance is given) is scaled in such a way
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_2d`.
Args: Args:
axes (matplotlib.axes.Axes): Matplotlib axes. axes (matplotlib.axes.Axes): Matplotlib axes.
@ -130,19 +179,7 @@ def plot_point2_on_axes(axes,
""" """
axes.plot([point[0]], [point[1]], linespec, marker='.', markersize=10) axes.plot([point[0]], [point[1]], linespec, marker='.', markersize=10)
if P is not None: if P is not None:
w, v = np.linalg.eig(P) plot_covariance_ellipse_2d(axes, point, P)
# 5 sigma corresponds to 99.9996%, see note above
k = 5.0
angle = np.arctan2(v[1, 0], v[0, 0])
e1 = patches.Ellipse(point,
np.sqrt(w[0] * k),
np.sqrt(w[1] * k),
np.rad2deg(angle),
fill=False)
axes.add_patch(e1)
def plot_point2( def plot_point2(
fignum: int, fignum: int,
@ -154,6 +191,9 @@ def plot_point2(
""" """
Plot a 2D point on given figure with given `linespec`. Plot a 2D point on given figure with given `linespec`.
The uncertainty ellipse (if covariance is given) is scaled in such a way
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_2d`.
Args: Args:
fignum: Integer representing the figure number to use for plotting. fignum: Integer representing the figure number to use for plotting.
point: The point to be plotted. point: The point to be plotted.
@ -182,6 +222,9 @@ def plot_pose2_on_axes(axes,
""" """
Plot a 2D pose on given axis `axes` with given `axis_length`. Plot a 2D pose on given axis `axes` with given `axis_length`.
The ellipse is scaled in such a way that 95% of drawn samples are inliers,
see `plot_covariance_ellipse_2d`.
Args: Args:
axes (matplotlib.axes.Axes): Matplotlib axes. axes (matplotlib.axes.Axes): Matplotlib axes.
pose: The pose to be plotted. pose: The pose to be plotted.
@ -206,19 +249,7 @@ def plot_pose2_on_axes(axes,
if covariance is not None: if covariance is not None:
pPp = covariance[0:2, 0:2] pPp = covariance[0:2, 0:2]
gPp = np.matmul(np.matmul(gRp, pPp), gRp.T) gPp = np.matmul(np.matmul(gRp, pPp), gRp.T)
plot_covariance_ellipse_2d(axes, origin, gPp)
w, v = np.linalg.eig(gPp)
# 5 sigma corresponds to 99.9996%, see note above
k = 5.0
angle = np.arctan2(v[1, 0], v[0, 0])
e1 = patches.Ellipse(origin,
np.sqrt(w[0] * k),
np.sqrt(w[1] * k),
np.rad2deg(angle),
fill=False)
axes.add_patch(e1)
def plot_pose2( def plot_pose2(
@ -231,6 +262,9 @@ def plot_pose2(
""" """
Plot a 2D pose on given figure with given `axis_length`. Plot a 2D pose on given figure with given `axis_length`.
The uncertainty ellipse (if covariance is given) is scaled in such a way
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_2d`.
Args: Args:
fignum: Integer representing the figure number to use for plotting. fignum: Integer representing the figure number to use for plotting.
pose: The pose to be plotted. pose: The pose to be plotted.
@ -260,6 +294,9 @@ def plot_point3_on_axes(axes,
""" """
Plot a 3D point on given axis `axes` with given `linespec`. Plot a 3D point on given axis `axes` with given `linespec`.
The uncertainty ellipse (if covariance is given) is scaled in such a way
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`.
Args: Args:
axes (matplotlib.axes.Axes): Matplotlib axes. axes (matplotlib.axes.Axes): Matplotlib axes.
point: The point to be plotted. point: The point to be plotted.
@ -281,6 +318,9 @@ def plot_point3(
""" """
Plot a 3D point on given figure with given `linespec`. Plot a 3D point on given figure with given `linespec`.
The uncertainty ellipse (if covariance is given) is scaled in such a way
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`.
Args: Args:
fignum: Integer representing the figure number to use for plotting. fignum: Integer representing the figure number to use for plotting.
point: The point to be plotted. point: The point to be plotted.
@ -355,6 +395,9 @@ def plot_pose3_on_axes(axes, pose, axis_length=0.1, P=None, scale=1):
""" """
Plot a 3D pose on given axis `axes` with given `axis_length`. Plot a 3D pose on given axis `axes` with given `axis_length`.
The uncertainty ellipse (if covariance is given) is scaled in such a way
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`.
Args: Args:
axes (matplotlib.axes.Axes): Matplotlib axes. axes (matplotlib.axes.Axes): Matplotlib axes.
point (gtsam.Point3): The point to be plotted. point (gtsam.Point3): The point to be plotted.
@ -397,6 +440,9 @@ def plot_pose3(
""" """
Plot a 3D pose on given figure with given `axis_length`. Plot a 3D pose on given figure with given `axis_length`.
The uncertainty ellipse (if covariance is given) is scaled in such a way
that 95% of drawn samples are inliers, see `plot_covariance_ellipse_3d`.
Args: Args:
fignum: Integer representing the figure number to use for plotting. fignum: Integer representing the figure number to use for plotting.
pose (gtsam.Pose3): 3D pose to be plotted. pose (gtsam.Pose3): 3D pose to be plotted.

View File

@ -335,21 +335,21 @@ TEST(NonlinearFactorGraph, dot) {
"graph {\n" "graph {\n"
" size=\"5,5\";\n" " size=\"5,5\";\n"
"\n" "\n"
" varl1[label=\"l1\"];\n" " var7782220156096217089[label=\"l1\"];\n"
" varx1[label=\"x1\"];\n" " var8646911284551352321[label=\"x1\"];\n"
" varx2[label=\"x2\"];\n" " var8646911284551352322[label=\"x2\"];\n"
"\n" "\n"
" factor0[label=\"\", shape=point];\n" " factor0[label=\"\", shape=point];\n"
" varx1--factor0;\n" " var8646911284551352321--factor0;\n"
" factor1[label=\"\", shape=point];\n" " factor1[label=\"\", shape=point];\n"
" varx1--factor1;\n" " var8646911284551352321--factor1;\n"
" varx2--factor1;\n" " var8646911284551352322--factor1;\n"
" factor2[label=\"\", shape=point];\n" " factor2[label=\"\", shape=point];\n"
" varx1--factor2;\n" " var8646911284551352321--factor2;\n"
" varl1--factor2;\n" " var7782220156096217089--factor2;\n"
" factor3[label=\"\", shape=point];\n" " factor3[label=\"\", shape=point];\n"
" varx2--factor3;\n" " var8646911284551352322--factor3;\n"
" varl1--factor3;\n" " var7782220156096217089--factor3;\n"
"}\n"; "}\n";
const NonlinearFactorGraph fg = createNonlinearFactorGraph(); const NonlinearFactorGraph fg = createNonlinearFactorGraph();
@ -363,21 +363,21 @@ TEST(NonlinearFactorGraph, dot_extra) {
"graph {\n" "graph {\n"
" size=\"5,5\";\n" " size=\"5,5\";\n"
"\n" "\n"
" varl1[label=\"l1\", pos=\"0,0!\"];\n" " var7782220156096217089[label=\"l1\", pos=\"0,0!\"];\n"
" varx1[label=\"x1\", pos=\"1,0!\"];\n" " var8646911284551352321[label=\"x1\", pos=\"1,0!\"];\n"
" varx2[label=\"x2\", pos=\"1,1.5!\"];\n" " var8646911284551352322[label=\"x2\", pos=\"1,1.5!\"];\n"
"\n" "\n"
" factor0[label=\"\", shape=point];\n" " factor0[label=\"\", shape=point];\n"
" varx1--factor0;\n" " var8646911284551352321--factor0;\n"
" factor1[label=\"\", shape=point];\n" " factor1[label=\"\", shape=point];\n"
" varx1--factor1;\n" " var8646911284551352321--factor1;\n"
" varx2--factor1;\n" " var8646911284551352322--factor1;\n"
" factor2[label=\"\", shape=point];\n" " factor2[label=\"\", shape=point];\n"
" varx1--factor2;\n" " var8646911284551352321--factor2;\n"
" varl1--factor2;\n" " var7782220156096217089--factor2;\n"
" factor3[label=\"\", shape=point];\n" " factor3[label=\"\", shape=point];\n"
" varx2--factor3;\n" " var8646911284551352322--factor3;\n"
" varl1--factor3;\n" " var7782220156096217089--factor3;\n"
"}\n"; "}\n";
const NonlinearFactorGraph fg = createNonlinearFactorGraph(); const NonlinearFactorGraph fg = createNonlinearFactorGraph();

View File

@ -17,8 +17,8 @@
*/ */
#include <CppUnitLite/TestHarness.h> #include <CppUnitLite/TestHarness.h>
#include <gtsam/sfm/TranslationRecovery.h>
#include <gtsam/sfm/SfmData.h> #include <gtsam/sfm/SfmData.h>
#include <gtsam/sfm/TranslationRecovery.h>
#include <gtsam/slam/dataset.h> #include <gtsam/slam/dataset.h>
using namespace std; using namespace std;
@ -62,13 +62,13 @@ TEST(TranslationRecovery, BAL) {
unitTranslation.measured())); unitTranslation.measured()));
} }
TranslationRecovery algorithm(relativeTranslations); TranslationRecovery algorithm;
const auto graph = algorithm.buildGraph(); const auto graph = algorithm.buildGraph(relativeTranslations);
EXPECT_LONGS_EQUAL(3, graph.size()); EXPECT_LONGS_EQUAL(3, graph.size());
// Run translation recovery // Run translation recovery
const double scale = 2.0; const double scale = 2.0;
const auto result = algorithm.run(scale); const auto result = algorithm.run(relativeTranslations, scale);
// Check result for first two translations, determined by prior // Check result for first two translations, determined by prior
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0))); EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0)));
@ -107,12 +107,12 @@ TEST(TranslationRecovery, TwoPoseTest) {
unitTranslation.measured())); unitTranslation.measured()));
} }
TranslationRecovery algorithm(relativeTranslations); TranslationRecovery algorithm;
const auto graph = algorithm.buildGraph(); const auto graph = algorithm.buildGraph(relativeTranslations);
EXPECT_LONGS_EQUAL(1, graph.size()); EXPECT_LONGS_EQUAL(1, graph.size());
// Run translation recovery // Run translation recovery
const auto result = algorithm.run(/*scale=*/3.0); const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0);
// Check result for first two translations, determined by prior // Check result for first two translations, determined by prior
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8)); EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8));
@ -145,11 +145,11 @@ TEST(TranslationRecovery, ThreePoseTest) {
unitTranslation.measured())); unitTranslation.measured()));
} }
TranslationRecovery algorithm(relativeTranslations); TranslationRecovery algorithm;
const auto graph = algorithm.buildGraph(); const auto graph = algorithm.buildGraph(relativeTranslations);
EXPECT_LONGS_EQUAL(3, graph.size()); EXPECT_LONGS_EQUAL(3, graph.size());
const auto result = algorithm.run(/*scale=*/3.0); const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0);
// Check result // Check result
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8)); EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8));
@ -180,13 +180,9 @@ TEST(TranslationRecovery, ThreePosesIncludingZeroTranslation) {
unitTranslation.measured())); unitTranslation.measured()));
} }
TranslationRecovery algorithm(relativeTranslations); TranslationRecovery algorithm;
const auto graph = algorithm.buildGraph();
// There is only 1 non-zero translation edge.
EXPECT_LONGS_EQUAL(1, graph.size());
// Run translation recovery // Run translation recovery
const auto result = algorithm.run(/*scale=*/3.0); const auto result = algorithm.run(relativeTranslations, /*scale=*/3.0);
// Check result // Check result
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8)); EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8));
@ -222,12 +218,10 @@ TEST(TranslationRecovery, FourPosesIncludingZeroTranslation) {
unitTranslation.measured())); unitTranslation.measured()));
} }
TranslationRecovery algorithm(relativeTranslations); TranslationRecovery algorithm;
const auto graph = algorithm.buildGraph();
EXPECT_LONGS_EQUAL(3, graph.size());
// Run translation recovery // Run translation recovery
const auto result = algorithm.run(/*scale=*/4.0); const auto result = algorithm.run(relativeTranslations, /*scale=*/4.0);
// Check result // Check result
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8)); EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8));
@ -251,13 +245,10 @@ TEST(TranslationRecovery, ThreePosesWithZeroTranslation) {
unitTranslation.measured())); unitTranslation.measured()));
} }
TranslationRecovery algorithm(relativeTranslations); TranslationRecovery algorithm;
const auto graph = algorithm.buildGraph();
// Graph size will be zero as there no 'non-zero distance' edges.
EXPECT_LONGS_EQUAL(0, graph.size());
// Run translation recovery // Run translation recovery
const auto result = algorithm.run(/*scale=*/4.0); const auto result = algorithm.run(relativeTranslations, /*scale=*/4.0);
// Check result // Check result
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8)); EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-8));
@ -265,6 +256,73 @@ TEST(TranslationRecovery, ThreePosesWithZeroTranslation) {
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(2), 1e-8)); EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(2), 1e-8));
} }
TEST(TranslationRecovery, ThreePosesWithOneSoftConstraint) {
// Create a dataset with 3 poses.
// __ __
// \/ \/
// 0 _____ 1
// \ __ /
// \\//
// 3
//
// 0 and 1 face in the same direction but have a translation offset. 3 is in
// the same direction as 0 and 1, in between 0 and 1, with some Y axis offset.
Values poses;
poses.insert<Pose3>(0, Pose3(Rot3(), Point3(0, 0, 0)));
poses.insert<Pose3>(1, Pose3(Rot3(), Point3(2, 0, 0)));
poses.insert<Pose3>(3, Pose3(Rot3(), Point3(1, -1, 0)));
auto relativeTranslations = TranslationRecovery::SimulateMeasurements(
poses, {{0, 1}, {0, 3}, {1, 3}});
std::vector<BinaryMeasurement<Point3>> betweenTranslations;
betweenTranslations.emplace_back(0, 3, Point3(1, -1, 0),
noiseModel::Isotropic::Sigma(3, 1e-2));
TranslationRecovery algorithm;
auto result =
algorithm.run(relativeTranslations, /*scale=*/0.0, betweenTranslations);
// Check result
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-4));
EXPECT(assert_equal(Point3(2, 0, 0), result.at<Point3>(1), 1e-4));
EXPECT(assert_equal(Point3(1, -1, 0), result.at<Point3>(3), 1e-4));
}
TEST(TranslationRecovery, ThreePosesWithOneHardConstraint) {
// Create a dataset with 3 poses.
// __ __
// \/ \/
// 0 _____ 1
// \ __ /
// \\//
// 3
//
// 0 and 1 face in the same direction but have a translation offset. 3 is in
// the same direction as 0 and 1, in between 0 and 1, with some Y axis offset.
Values poses;
poses.insert<Pose3>(0, Pose3(Rot3(), Point3(0, 0, 0)));
poses.insert<Pose3>(1, Pose3(Rot3(), Point3(2, 0, 0)));
poses.insert<Pose3>(3, Pose3(Rot3(), Point3(1, -1, 0)));
auto relativeTranslations = TranslationRecovery::SimulateMeasurements(
poses, {{0, 1}, {0, 3}, {1, 3}});
std::vector<BinaryMeasurement<Point3>> betweenTranslations;
betweenTranslations.emplace_back(0, 1, Point3(2, 0, 0),
noiseModel::Constrained::All(3, 1e2));
TranslationRecovery algorithm;
auto result =
algorithm.run(relativeTranslations, /*scale=*/0.0, betweenTranslations);
// Check result
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0), 1e-4));
EXPECT(assert_equal(Point3(2, 0, 0), result.at<Point3>(1), 1e-4));
EXPECT(assert_equal(Point3(1, -1, 0), result.at<Point3>(3), 1e-4));
}
/* ************************************************************************* */ /* ************************************************************************* */
int main() { int main() {
TestResult tr; TestResult tr;