Merge branch 'develop' into feature/python-install
commit
db40cd71fc
|
@ -0,0 +1,56 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
FrobeniusFactor unit tests.
|
||||
Author: Frank Dellaert
|
||||
"""
|
||||
# pylint: disable=no-name-in-module, import-error, invalid-name
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4,
|
||||
FrobeniusWormholeFactor, SOn)
|
||||
|
||||
id = SO4()
|
||||
v1 = np.array([0, 0, 0, 0.1, 0, 0])
|
||||
Q1 = SO4.Expmap(v1)
|
||||
v2 = np.array([0, 0, 0, 0.01, 0.02, 0.03])
|
||||
Q2 = SO4.Expmap(v2)
|
||||
|
||||
|
||||
class TestFrobeniusFactorSO4(unittest.TestCase):
|
||||
"""Test FrobeniusFactor factors."""
|
||||
|
||||
def test_frobenius_factor(self):
|
||||
"""Test creation of a factor that calculates the Frobenius norm."""
|
||||
factor = FrobeniusFactorSO4(1, 2)
|
||||
actual = factor.evaluateError(Q1, Q2)
|
||||
expected = (Q2.matrix()-Q1.matrix()).transpose().reshape((16,))
|
||||
np.testing.assert_array_equal(actual, expected)
|
||||
|
||||
def test_frobenius_between_factor(self):
|
||||
"""Test creation of a Frobenius BetweenFactor."""
|
||||
factor = FrobeniusBetweenFactorSO4(1, 2, Q1.between(Q2))
|
||||
actual = factor.evaluateError(Q1, Q2)
|
||||
expected = np.zeros((16,))
|
||||
np.testing.assert_array_almost_equal(actual, expected)
|
||||
|
||||
def test_frobenius_wormhole_factor(self):
|
||||
"""Test creation of a factor that calculates Shonan error."""
|
||||
R1 = SO3.Expmap(v1[3:])
|
||||
R2 = SO3.Expmap(v2[3:])
|
||||
factor = FrobeniusWormholeFactor(1, 2, Rot3(R1.between(R2).matrix()), p=4)
|
||||
I4 = SOn(4)
|
||||
Q1 = I4.retract(v1)
|
||||
Q2 = I4.retract(v2)
|
||||
actual = factor.evaluateError(Q1, Q2)
|
||||
expected = np.zeros((12,))
|
||||
np.testing.assert_array_almost_equal(actual, expected, decimal=4)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -4,6 +4,12 @@ Author: Jing Wu and Frank Dellaert
|
|||
"""
|
||||
# pylint: disable=invalid-name
|
||||
|
||||
import sys
|
||||
if sys.version_info.major >= 3:
|
||||
from io import StringIO
|
||||
else:
|
||||
from cStringIO import StringIO
|
||||
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
|
||||
|
@ -37,11 +43,19 @@ class TestOptimizeComet(GtsamTestCase):
|
|||
self.optimizer = gtsam.GaussNewtonOptimizer(
|
||||
graph, initial, self.params)
|
||||
|
||||
# setup output capture
|
||||
self.capturedOutput = StringIO()
|
||||
sys.stdout = self.capturedOutput
|
||||
|
||||
def tearDown(self):
|
||||
"""Reset print capture."""
|
||||
sys.stdout = sys.__stdout__
|
||||
|
||||
def test_simple_printing(self):
|
||||
"""Test with a simple hook."""
|
||||
|
||||
# Provide a hook that just prints
|
||||
def hook(_, error: float):
|
||||
def hook(_, error):
|
||||
print(error)
|
||||
|
||||
# Only thing we require from optimizer is an iterate method
|
||||
|
@ -65,7 +79,7 @@ class TestOptimizeComet(GtsamTestCase):
|
|||
+ str(time.hour)+":"+str(time.minute)+":"+str(time.second))
|
||||
|
||||
# I want to do some comet thing here
|
||||
def hook(optimizer, error: float):
|
||||
def hook(optimizer, error):
|
||||
comet.log_metric("Karcher error",
|
||||
error, optimizer.iterations())
|
||||
|
||||
|
@ -76,4 +90,4 @@ class TestOptimizeComet(GtsamTestCase):
|
|||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
unittest.main()
|
||||
|
|
|
@ -4,15 +4,11 @@ Author: Jing Wu and Frank Dellaert
|
|||
"""
|
||||
# pylint: disable=invalid-name
|
||||
|
||||
from typing import TypeVar
|
||||
|
||||
from gtsam import NonlinearOptimizer, NonlinearOptimizerParams
|
||||
import gtsam
|
||||
|
||||
T = TypeVar('T')
|
||||
|
||||
|
||||
def optimize(optimizer: T, check_convergence, hook):
|
||||
def optimize(optimizer, check_convergence, hook):
|
||||
""" Given an optimizer and a convergence check, iterate until convergence.
|
||||
After each iteration, hook(optimizer, error) is called.
|
||||
After the function, use values and errors to get the result.
|
||||
|
@ -36,8 +32,8 @@ def optimize(optimizer: T, check_convergence, hook):
|
|||
current_error = new_error
|
||||
|
||||
|
||||
def gtsam_optimize(optimizer: NonlinearOptimizer,
|
||||
params: NonlinearOptimizerParams,
|
||||
def gtsam_optimize(optimizer,
|
||||
params,
|
||||
hook):
|
||||
""" Given an optimizer and params, iterate until convergence.
|
||||
After each iteration, hook(optimizer) is called.
|
||||
|
|
29
gtsam.h
29
gtsam.h
|
@ -598,6 +598,7 @@ class SOn {
|
|||
// Standard Constructors
|
||||
SOn(size_t n);
|
||||
static gtsam::SOn FromMatrix(Matrix R);
|
||||
static gtsam::SOn Lift(size_t n, Matrix R);
|
||||
|
||||
// Testable
|
||||
void print(string s) const;
|
||||
|
@ -2835,6 +2836,34 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor {
|
|||
KarcherMeanFactor(const gtsam::KeyVector& keys);
|
||||
};
|
||||
|
||||
#include <gtsam/slam/FrobeniusFactor.h>
|
||||
gtsam::noiseModel::Isotropic* ConvertPose3NoiseModel(
|
||||
gtsam::noiseModel::Base* model, size_t d);
|
||||
|
||||
template<T = {gtsam::SO3, gtsam::SO4}>
|
||||
virtual class FrobeniusFactor : gtsam::NoiseModelFactor {
|
||||
FrobeniusFactor(size_t key1, size_t key2);
|
||||
FrobeniusFactor(size_t key1, size_t key2, gtsam::noiseModel::Base* model);
|
||||
|
||||
Vector evaluateError(const T& R1, const T& R2);
|
||||
};
|
||||
|
||||
template<T = {gtsam::SO3, gtsam::SO4}>
|
||||
virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor {
|
||||
FrobeniusBetweenFactor(size_t key1, size_t key2, const T& R12);
|
||||
FrobeniusBetweenFactor(size_t key1, size_t key2, const T& R12, gtsam::noiseModel::Base* model);
|
||||
|
||||
Vector evaluateError(const T& R1, const T& R2);
|
||||
};
|
||||
|
||||
virtual class FrobeniusWormholeFactor : gtsam::NoiseModelFactor {
|
||||
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
|
||||
size_t p);
|
||||
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
|
||||
size_t p, gtsam::noiseModel::Base* model);
|
||||
Vector evaluateError(const gtsam::SOn& Q1, const gtsam::SOn& Q2);
|
||||
};
|
||||
|
||||
//*************************************************************************
|
||||
// Navigation
|
||||
//*************************************************************************
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <cstdint>
|
||||
|
||||
#include <exception>
|
||||
#include <string>
|
||||
|
||||
#ifdef GTSAM_USE_TBB
|
||||
#include <tbb/scalable_allocator.h>
|
||||
|
@ -54,7 +55,7 @@
|
|||
namespace gtsam {
|
||||
|
||||
/// Function to demangle type name of variable, e.g. demangle(typeid(x).name())
|
||||
std::string demangle(const char* name);
|
||||
std::string GTSAM_EXPORT demangle(const char* name);
|
||||
|
||||
/// Integer nonlinear key type
|
||||
typedef std::uint64_t Key;
|
||||
|
|
|
@ -169,6 +169,12 @@ class ExecutionTrace {
|
|||
content.ptr->reverseAD2(dTdA, jacobians);
|
||||
}
|
||||
|
||||
~ExecutionTrace() {
|
||||
if (kind == Function) {
|
||||
content.ptr->~CallRecord<Dim>();
|
||||
}
|
||||
}
|
||||
|
||||
/// Define type so we can apply it as a meta-function
|
||||
typedef ExecutionTrace<T> type;
|
||||
};
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
* @brief unit tests for Expression internals
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/internal/CallRecord.h>
|
||||
#include <gtsam/nonlinear/internal/ExecutionTrace.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
#pragma once
|
||||
|
||||
/**
|
||||
* @file TranslationFactor.h
|
||||
* @author Frank Dellaert
|
||||
* @date March 2020
|
||||
* @brief Binary factor for a relative translation direction measurement.
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Unit3.h>
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Binary factor for a relative translation direction measurement
|
||||
* w_aZb. The measurement equation is
|
||||
* w_aZb = Unit3(Tb - Ta)
|
||||
* i.e., w_aZb is the translation direction from frame A to B, in world
|
||||
* coordinates. Although Unit3 instances live on a manifold, following
|
||||
* Wilson14eccv_1DSfM.pdf error we compute the *chordal distance* in the
|
||||
* ambient world coordinate frame:
|
||||
* normalized(Tb - Ta) - w_aZb.point3()
|
||||
*
|
||||
* @addtogroup SFM
|
||||
*/
|
||||
class TranslationFactor : public NoiseModelFactor2<Point3, Point3> {
|
||||
private:
|
||||
typedef NoiseModelFactor2<Point3, Point3> Base;
|
||||
Point3 measured_w_aZb_;
|
||||
|
||||
public:
|
||||
/// default constructor
|
||||
TranslationFactor() {}
|
||||
|
||||
TranslationFactor(Key a, Key b, const Unit3& w_aZb,
|
||||
const SharedNoiseModel& noiseModel)
|
||||
: Base(noiseModel, a, b), measured_w_aZb_(w_aZb.point3()) {}
|
||||
|
||||
/**
|
||||
* @brief Caclulate error: (norm(Tb - Ta) - measurement)
|
||||
* where Tb and Ta are Point3 translations and measurement is
|
||||
* the Unit3 translation direction from a to b.
|
||||
*
|
||||
* @param Ta translation for key a
|
||||
* @param Tb translation for key b
|
||||
* @param H1 optional jacobian in Ta
|
||||
* @param H2 optional jacobian in Tb
|
||||
* @return * Vector
|
||||
*/
|
||||
Vector evaluateError(
|
||||
const Point3& Ta, const Point3& Tb,
|
||||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const override {
|
||||
const Point3 dir = Tb - Ta;
|
||||
Matrix33 H_predicted_dir;
|
||||
const Point3 predicted = normalize(dir, H1 || H2 ? &H_predicted_dir : nullptr);
|
||||
if (H1) *H1 = -H_predicted_dir;
|
||||
if (H2) *H2 = H_predicted_dir;
|
||||
return predicted - measured_w_aZb_;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||
ar& boost::serialization::make_nvp(
|
||||
"Base", boost::serialization::base_object<Base>(*this));
|
||||
}
|
||||
}; // \ TranslationFactor
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,103 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file TranslationRecovery.h
|
||||
* @author Frank Dellaert
|
||||
* @date March 2020
|
||||
* @brief test recovering translations when rotations are given.
|
||||
*/
|
||||
|
||||
#include <gtsam/sfm/TranslationRecovery.h>
|
||||
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/Unit3.h>
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/sfm/TranslationFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
|
||||
using namespace gtsam;
|
||||
using namespace std;
|
||||
|
||||
NonlinearFactorGraph TranslationRecovery::buildGraph() const {
|
||||
auto noiseModel = noiseModel::Isotropic::Sigma(3, 0.01);
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
// Add all relative translation edges
|
||||
for (auto edge : relativeTranslations_) {
|
||||
Key a, b;
|
||||
tie(a, b) = edge.first;
|
||||
const Unit3 w_aZb = edge.second;
|
||||
graph.emplace_shared<TranslationFactor>(a, b, w_aZb, noiseModel);
|
||||
}
|
||||
|
||||
return graph;
|
||||
}
|
||||
|
||||
void TranslationRecovery::addPrior(const double scale,
|
||||
NonlinearFactorGraph* graph) const {
|
||||
auto noiseModel = noiseModel::Isotropic::Sigma(3, 0.01);
|
||||
auto edge = relativeTranslations_.begin();
|
||||
Key a, b;
|
||||
tie(a, b) = edge->first;
|
||||
const Unit3 w_aZb = edge->second;
|
||||
graph->emplace_shared<PriorFactor<Point3> >(a, Point3(0, 0, 0), noiseModel);
|
||||
graph->emplace_shared<PriorFactor<Point3> >(b, scale * w_aZb.point3(),
|
||||
noiseModel);
|
||||
}
|
||||
|
||||
Values TranslationRecovery::initalizeRandomly() const {
|
||||
// Create a lambda expression that checks whether value exists and randomly
|
||||
// initializes if not.
|
||||
Values initial;
|
||||
auto insert = [&initial](Key j) {
|
||||
if (!initial.exists(j)) {
|
||||
initial.insert<Point3>(j, Vector3::Random());
|
||||
}
|
||||
};
|
||||
|
||||
// Loop over measurements and add a random translation
|
||||
for (auto edge : relativeTranslations_) {
|
||||
Key a, b;
|
||||
tie(a, b) = edge.first;
|
||||
insert(a);
|
||||
insert(b);
|
||||
}
|
||||
return initial;
|
||||
}
|
||||
|
||||
Values TranslationRecovery::run(const double scale) const {
|
||||
auto graph = buildGraph();
|
||||
addPrior(scale, &graph);
|
||||
const Values initial = initalizeRandomly();
|
||||
LevenbergMarquardtOptimizer lm(graph, initial, params_);
|
||||
Values result = lm.optimize();
|
||||
return result;
|
||||
}
|
||||
|
||||
TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements(
|
||||
const Values& poses, const vector<KeyPair>& edges) {
|
||||
TranslationEdges relativeTranslations;
|
||||
for (auto edge : edges) {
|
||||
Key a, b;
|
||||
tie(a, b) = edge;
|
||||
const Pose3 wTa = poses.at<Pose3>(a), wTb = poses.at<Pose3>(b);
|
||||
const Point3 Ta = wTa.translation(), Tb = wTb.translation();
|
||||
const Unit3 w_aZb(Tb - Ta);
|
||||
relativeTranslations[edge] = w_aZb;
|
||||
}
|
||||
return relativeTranslations;
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file TranslationRecovery.h
|
||||
* @author Frank Dellaert
|
||||
* @date March 2020
|
||||
* @brief test recovering translations when rotations are given.
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/Unit3.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
|
||||
#include <map>
|
||||
#include <utility>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
// Set up an optimization problem for the unknown translations Ti in the world
|
||||
// coordinate frame, given the known camera attitudes wRi with respect to the
|
||||
// world frame, and a set of (noisy) translation directions of type Unit3,
|
||||
// w_aZb. The measurement equation is
|
||||
// w_aZb = Unit3(Tb - Ta) (1)
|
||||
// i.e., w_aZb is the translation direction from frame A to B, in world
|
||||
// coordinates. Although Unit3 instances live on a manifold, following
|
||||
// Wilson14eccv_1DSfM.pdf error we compute the *chordal distance* in the
|
||||
// ambient world coordinate frame.
|
||||
//
|
||||
// It is clear that we cannot recover the scale, nor the absolute position,
|
||||
// so the gauge freedom in this case is 3 + 1 = 4. We fix these by taking fixing
|
||||
// the translations Ta and Tb associated with the first measurement w_aZb,
|
||||
// clamping them to their initial values as given to this method. If no initial
|
||||
// values are given, we use the origin for Tb and set Tb to make (1) come
|
||||
// through, i.e.,
|
||||
// Tb = s * wRa * Point3(w_aZb) (2)
|
||||
// where s is an arbitrary scale that can be supplied, default 1.0. Hence, two
|
||||
// versions are supplied below corresponding to whether we have initial values
|
||||
// or not.
|
||||
class TranslationRecovery {
|
||||
public:
|
||||
using KeyPair = std::pair<Key, Key>;
|
||||
using TranslationEdges = std::map<KeyPair, Unit3>;
|
||||
|
||||
private:
|
||||
TranslationEdges relativeTranslations_;
|
||||
LevenbergMarquardtParams params_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Translation Recovery object
|
||||
*
|
||||
* @param relativeTranslations the relative translations, in world coordinate
|
||||
* frames, indexed in a map by a pair of Pose keys.
|
||||
* @param lmParams (optional) gtsam::LavenbergMarquardtParams that can be
|
||||
* used to modify the parameters for the LM optimizer. By default, uses the
|
||||
* default LM parameters.
|
||||
*/
|
||||
TranslationRecovery(const TranslationEdges& relativeTranslations,
|
||||
const LevenbergMarquardtParams& lmParams = LevenbergMarquardtParams())
|
||||
: relativeTranslations_(relativeTranslations), params_(lmParams) {
|
||||
params_.setVerbosityLM("Summary");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Build the factor graph to do the optimization.
|
||||
*
|
||||
* @return NonlinearFactorGraph
|
||||
*/
|
||||
NonlinearFactorGraph buildGraph() const;
|
||||
|
||||
/**
|
||||
* @brief Add priors on ednpoints of first measurement edge.
|
||||
*
|
||||
* @param scale scale for first relative translation which fixes gauge.
|
||||
* @param graph factor graph to which prior is added.
|
||||
*/
|
||||
void addPrior(const double scale, NonlinearFactorGraph* graph) const;
|
||||
|
||||
/**
|
||||
* @brief Create random initial translations.
|
||||
*
|
||||
* @return Values
|
||||
*/
|
||||
Values initalizeRandomly() const;
|
||||
|
||||
/**
|
||||
* @brief Build and optimize factor graph.
|
||||
*
|
||||
* @param scale scale for first relative translation which fixes gauge.
|
||||
* @return Values
|
||||
*/
|
||||
Values run(const double scale = 1.0) const;
|
||||
|
||||
/**
|
||||
* @brief Simulate translation direction measurements
|
||||
*
|
||||
* @param poses SE(3) ground truth poses stored as Values
|
||||
* @param edges pairs (a,b) for which a measurement w_aZb will be generated.
|
||||
* @return TranslationEdges map from a KeyPair to the simulated Unit3
|
||||
* translation direction measurement between the cameras in KeyPair.
|
||||
*/
|
||||
static TranslationEdges SimulateMeasurements(
|
||||
const Values& poses, const std::vector<KeyPair>& edges);
|
||||
};
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,108 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testTranslationFactor.cpp
|
||||
* @brief Unit tests for TranslationFactor Class
|
||||
* @author Frank dellaert
|
||||
* @date March 2020
|
||||
*/
|
||||
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/sfm/TranslationFactor.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// Create a noise model for the chordal error
|
||||
static SharedNoiseModel model(noiseModel::Isotropic::Sigma(3, 0.05));
|
||||
|
||||
// Keys are deliberately *not* in sorted order to test that case.
|
||||
static const Key kKey1(2), kKey2(1);
|
||||
static const Unit3 kMeasured(1, 0, 0);
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(TranslationFactor, Constructor) {
|
||||
TranslationFactor factor(kKey1, kKey2, kMeasured, model);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(TranslationFactor, ZeroError) {
|
||||
// Create a factor
|
||||
TranslationFactor factor(kKey1, kKey2, kMeasured, model);
|
||||
|
||||
// Set the linearization
|
||||
Point3 T1(1, 0, 0), T2(2, 0, 0);
|
||||
|
||||
// Use the factor to calculate the error
|
||||
Vector actualError(factor.evaluateError(T1, T2));
|
||||
|
||||
// Verify we get the expected error
|
||||
Vector expected = (Vector3() << 0, 0, 0).finished();
|
||||
EXPECT(assert_equal(expected, actualError, 1e-9));
|
||||
|
||||
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(TranslationFactor, NonZeroError) {
|
||||
// create a factor
|
||||
TranslationFactor factor(kKey1, kKey2, kMeasured, model);
|
||||
|
||||
// set the linearization
|
||||
Point3 T1(0, 1, 1), T2(0, 2, 2);
|
||||
|
||||
// use the factor to calculate the error
|
||||
Vector actualError(factor.evaluateError(T1, T2));
|
||||
|
||||
// verify we get the expected error
|
||||
Vector expected = (Vector3() << -1, 1/sqrt(2), 1/sqrt(2)).finished();
|
||||
EXPECT(assert_equal(expected, actualError, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector factorError(const Point3& T1, const Point3& T2,
|
||||
const TranslationFactor& factor) {
|
||||
return factor.evaluateError(T1, T2);
|
||||
}
|
||||
|
||||
TEST(TranslationFactor, Jacobian) {
|
||||
// Create a factor
|
||||
TranslationFactor factor(kKey1, kKey2, kMeasured, model);
|
||||
|
||||
// Set the linearization
|
||||
Point3 T1(1, 0, 0), T2(2, 0, 0);
|
||||
|
||||
// Use the factor to calculate the Jacobians
|
||||
Matrix H1Actual, H2Actual;
|
||||
factor.evaluateError(T1, T2, H1Actual, H2Actual);
|
||||
|
||||
// Use numerical derivatives to calculate the Jacobians
|
||||
Matrix H1Expected, H2Expected;
|
||||
H1Expected = numericalDerivative11<Vector, Point3>(
|
||||
boost::bind(&factorError, _1, T2, factor), T1);
|
||||
H2Expected = numericalDerivative11<Vector, Point3>(
|
||||
boost::bind(&factorError, T1, _1, factor), T2);
|
||||
|
||||
// Verify the Jacobians are correct
|
||||
EXPECT(assert_equal(H1Expected, H1Actual, 1e-9));
|
||||
EXPECT(assert_equal(H2Expected, H2Actual, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -0,0 +1,117 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file FrobeniusFactor.cpp
|
||||
* @date March 2019
|
||||
* @author Frank Dellaert
|
||||
* @brief Various factors that minimize some Frobenius norm
|
||||
*/
|
||||
|
||||
#include <gtsam/slam/FrobeniusFactor.h>
|
||||
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
//******************************************************************************
|
||||
boost::shared_ptr<noiseModel::Isotropic> ConvertPose3NoiseModel(
|
||||
const SharedNoiseModel& model, size_t d, bool defaultToUnit) {
|
||||
double sigma = 1.0;
|
||||
if (model != nullptr) {
|
||||
if (model->dim() != 6) {
|
||||
if (!defaultToUnit)
|
||||
throw std::runtime_error("Can only convert Pose3 noise models");
|
||||
} else {
|
||||
auto sigmas = model->sigmas().head(3).eval();
|
||||
if (sigmas(1) != sigmas(0) || sigmas(2) != sigmas(0)) {
|
||||
if (!defaultToUnit)
|
||||
throw std::runtime_error("Can only convert isotropic rotation noise");
|
||||
} else {
|
||||
sigma = sigmas(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return noiseModel::Isotropic::Sigma(d, sigma);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
FrobeniusWormholeFactor::FrobeniusWormholeFactor(Key j1, Key j2, const Rot3& R12,
|
||||
size_t p,
|
||||
const SharedNoiseModel& model)
|
||||
: NoiseModelFactor2<SOn, SOn>(ConvertPose3NoiseModel(model, p * 3), j1, j2),
|
||||
M_(R12.matrix()), // 3*3 in all cases
|
||||
p_(p), // 4 for SO(4)
|
||||
pp_(p * p), // 16 for SO(4)
|
||||
dimension_(SOn::Dimension(p)), // 6 for SO(4)
|
||||
G_(pp_, dimension_) // 16*6 for SO(4)
|
||||
{
|
||||
// Calculate G matrix of vectorized generators
|
||||
Matrix Z = Matrix::Zero(p, p);
|
||||
for (size_t j = 0; j < dimension_; j++) {
|
||||
const auto X = SOn::Hat(Eigen::VectorXd::Unit(dimension_, j));
|
||||
G_.col(j) = Eigen::Map<const Matrix>(X.data(), pp_, 1);
|
||||
}
|
||||
assert(noiseModel()->dim() == 3 * p_);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
Vector FrobeniusWormholeFactor::evaluateError(
|
||||
const SOn& Q1, const SOn& Q2, boost::optional<Matrix&> H1,
|
||||
boost::optional<Matrix&> H2) const {
|
||||
gttic(FrobeniusWormholeFactorP_evaluateError);
|
||||
|
||||
const Matrix& M1 = Q1.matrix();
|
||||
const Matrix& M2 = Q2.matrix();
|
||||
assert(M1.rows() == p_ && M2.rows() == p_);
|
||||
|
||||
const size_t dim = 3 * p_; // Stiefel manifold dimension
|
||||
Vector fQ2(dim), hQ1(dim);
|
||||
|
||||
// Vectorize and extract only d leftmost columns, i.e. vec(M2*P)
|
||||
fQ2 << Eigen::Map<const Matrix>(M2.data(), dim, 1);
|
||||
|
||||
// Vectorize M1*P*R12
|
||||
const Matrix Q1PR12 = M1.leftCols<3>() * M_;
|
||||
hQ1 << Eigen::Map<const Matrix>(Q1PR12.data(), dim, 1);
|
||||
|
||||
// If asked, calculate Jacobian as (M \otimes M1) * G
|
||||
if (H1) {
|
||||
const size_t p2 = 2 * p_;
|
||||
Matrix RPxQ = Matrix::Zero(dim, pp_);
|
||||
RPxQ.block(0, 0, p_, dim) << M1 * M_(0, 0), M1 * M_(1, 0), M1 * M_(2, 0);
|
||||
RPxQ.block(p_, 0, p_, dim) << M1 * M_(0, 1), M1 * M_(1, 1), M1 * M_(2, 1);
|
||||
RPxQ.block(p2, 0, p_, dim) << M1 * M_(0, 2), M1 * M_(1, 2), M1 * M_(2, 2);
|
||||
*H1 = -RPxQ * G_;
|
||||
}
|
||||
if (H2) {
|
||||
const size_t p2 = 2 * p_;
|
||||
Matrix PxQ = Matrix::Zero(dim, pp_);
|
||||
PxQ.block(0, 0, p_, p_) = M2;
|
||||
PxQ.block(p_, p_, p_, p_) = M2;
|
||||
PxQ.block(p2, p2, p_, p_) = M2;
|
||||
*H2 = PxQ * G_;
|
||||
}
|
||||
|
||||
return fQ2 - hQ1;
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,145 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file FrobeniusFactor.h
|
||||
* @date March 2019
|
||||
* @author Frank Dellaert
|
||||
* @brief Various factors that minimize some Frobenius norm
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/geometry/SOn.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* When creating (any) FrobeniusFactor we convert a 6-dimensional Pose3
|
||||
* BetweenFactor noise model into an 9 or 16-dimensional isotropic noise
|
||||
* model used to weight the Frobenius norm. If the noise model passed is
|
||||
* null we return a Dim-dimensional isotropic noise model with sigma=1.0. If
|
||||
* not, we we check if the 3-dimensional noise model on rotations is
|
||||
* isotropic. If it is, we extend to 'Dim' dimensions, otherwise we throw an
|
||||
* error. If defaultToUnit == false throws an exception on unexepcted input.
|
||||
*/
|
||||
boost::shared_ptr<noiseModel::Isotropic> ConvertPose3NoiseModel(
|
||||
const SharedNoiseModel& model, size_t d, bool defaultToUnit = true);
|
||||
|
||||
/**
|
||||
* FrobeniusPrior calculates the Frobenius norm between a given matrix and an
|
||||
* element of SO(3) or SO(4).
|
||||
*/
|
||||
template <class Rot>
|
||||
class FrobeniusPrior : public NoiseModelFactor1<Rot> {
|
||||
enum { Dim = Rot::VectorN2::RowsAtCompileTime };
|
||||
using MatrixNN = typename Rot::MatrixNN;
|
||||
Eigen::Matrix<double, Dim, 1> vecM_; ///< vectorized matrix to approximate
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
FrobeniusPrior(Key j, const MatrixNN& M,
|
||||
const SharedNoiseModel& model = nullptr)
|
||||
: NoiseModelFactor1<Rot>(ConvertPose3NoiseModel(model, Dim), j) {
|
||||
vecM_ << Eigen::Map<const Matrix>(M.data(), Dim, 1);
|
||||
}
|
||||
|
||||
/// Error is just Frobenius norm between Rot element and vectorized matrix M.
|
||||
Vector evaluateError(const Rot& R,
|
||||
boost::optional<Matrix&> H = boost::none) const {
|
||||
return R.vec(H) - vecM_; // Jacobian is computed only when needed.
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* FrobeniusFactor calculates the Frobenius norm between rotation matrices.
|
||||
* The template argument can be any fixed-size SO<N>.
|
||||
*/
|
||||
template <class Rot>
|
||||
class FrobeniusFactor : public NoiseModelFactor2<Rot, Rot> {
|
||||
enum { Dim = Rot::VectorN2::RowsAtCompileTime };
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
FrobeniusFactor(Key j1, Key j2, const SharedNoiseModel& model = nullptr)
|
||||
: NoiseModelFactor2<Rot, Rot>(ConvertPose3NoiseModel(model, Dim), j1,
|
||||
j2) {}
|
||||
|
||||
/// Error is just Frobenius norm between rotation matrices.
|
||||
Vector evaluateError(const Rot& R1, const Rot& R2,
|
||||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const {
|
||||
Vector error = R2.vec(H2) - R1.vec(H1);
|
||||
if (H1) *H1 = -*H1;
|
||||
return error;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* FrobeniusBetweenFactor is a BetweenFactor that evaluates the Frobenius norm
|
||||
* of the rotation error between measured and predicted (rather than the
|
||||
* Logmap of the error). This factor is only defined for fixed-dimension types,
|
||||
* and in fact only SO3 and SO4 really work, as we need SO<N>::AdjointMap.
|
||||
*/
|
||||
template <class Rot>
|
||||
class FrobeniusBetweenFactor : public NoiseModelFactor2<Rot, Rot> {
|
||||
Rot R12_; ///< measured rotation between R1 and R2
|
||||
Eigen::Matrix<double, Rot::dimension, Rot::dimension>
|
||||
R2hat_H_R1_; ///< fixed derivative of R2hat wrpt R1
|
||||
enum { Dim = Rot::VectorN2::RowsAtCompileTime };
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
FrobeniusBetweenFactor(Key j1, Key j2, const Rot& R12,
|
||||
const SharedNoiseModel& model = nullptr)
|
||||
: NoiseModelFactor2<Rot, Rot>(
|
||||
ConvertPose3NoiseModel(model, Dim), j1, j2),
|
||||
R12_(R12),
|
||||
R2hat_H_R1_(R12.inverse().AdjointMap()) {}
|
||||
|
||||
/// Error is Frobenius norm between R1*R12 and R2.
|
||||
Vector evaluateError(const Rot& R1, const Rot& R2,
|
||||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const {
|
||||
const Rot R2hat = R1.compose(R12_);
|
||||
Eigen::Matrix<double, Dim, Rot::dimension> vec_H_R2hat;
|
||||
Vector error = R2.vec(H2) - R2hat.vec(H1 ? &vec_H_R2hat : nullptr);
|
||||
if (H1) *H1 = -vec_H_R2hat * R2hat_H_R1_;
|
||||
return error;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* FrobeniusWormholeFactor is a BetweenFactor that moves in SO(p), but will
|
||||
* land on the SO(3) sub-manifold of SO(p) at the global minimum. It projects
|
||||
* the SO(p) matrices down to a Stiefel manifold of p*d matrices.
|
||||
* TODO(frank): template on D=2 or 3
|
||||
*/
|
||||
class FrobeniusWormholeFactor : public NoiseModelFactor2<SOn, SOn> {
|
||||
Matrix M_; ///< measured rotation between R1 and R2
|
||||
size_t p_, pp_, dimension_; ///< dimensionality constants
|
||||
Matrix G_; ///< matrix of vectorized generators
|
||||
|
||||
public:
|
||||
/// Constructor. Note we convert to 3*p-dimensional noise model.
|
||||
FrobeniusWormholeFactor(Key j1, Key j2, const Rot3& R12, size_t p = 4,
|
||||
const SharedNoiseModel& model = nullptr);
|
||||
|
||||
/// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0]
|
||||
/// projects down from SO(p) to the Stiefel manifold of px3 matrices.
|
||||
Vector evaluateError(const SOn& Q1, const SOn& Q2,
|
||||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,244 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* testFrobeniusFactor.cpp
|
||||
*
|
||||
* @file testFrobeniusFactor.cpp
|
||||
* @date March 2019
|
||||
* @author Frank Dellaert
|
||||
* @brief Check evaluateError for various Frobenius norm
|
||||
*/
|
||||
|
||||
#include <gtsam/base/lieProxies.h>
|
||||
#include <gtsam/base/testLie.h>
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/geometry/SO3.h>
|
||||
#include <gtsam/geometry/SO4.h>
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
#include <gtsam/slam/FrobeniusFactor.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
//******************************************************************************
|
||||
namespace so3 {
|
||||
SO3 id;
|
||||
Vector3 v1 = (Vector(3) << 0.1, 0, 0).finished();
|
||||
SO3 R1 = SO3::Expmap(v1);
|
||||
Vector3 v2 = (Vector(3) << 0.01, 0.02, 0.03).finished();
|
||||
SO3 R2 = SO3::Expmap(v2);
|
||||
SO3 R12 = R1.between(R2);
|
||||
} // namespace so3
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusPriorSO3, evaluateError) {
|
||||
using namespace ::so3;
|
||||
auto factor = FrobeniusPrior<SO3>(1, R2.matrix());
|
||||
Vector actual = factor.evaluateError(R1);
|
||||
Vector expected = R1.vec() - R2.vec();
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
Values values;
|
||||
values.insert(1, R1);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusPriorSO3, ClosestTo) {
|
||||
// Example top-left of SO(4) matrix not quite on SO(3) manifold
|
||||
Matrix3 M;
|
||||
M << 0.79067393, 0.6051136, -0.0930814, //
|
||||
0.4155925, -0.64214347, -0.64324489, //
|
||||
-0.44948549, 0.47046326, -0.75917576;
|
||||
|
||||
SO3 expected = SO3::ClosestTo(M);
|
||||
|
||||
// manifold optimization gets same result as SVD solution in ClosestTo
|
||||
NonlinearFactorGraph graph;
|
||||
graph.emplace_shared<FrobeniusPrior<SO3> >(1, M);
|
||||
|
||||
Values initial;
|
||||
initial.insert(1, SO3(I_3x3));
|
||||
auto result = GaussNewtonOptimizer(graph, initial).optimize();
|
||||
EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-6);
|
||||
EXPECT(assert_equal(expected, result.at<SO3>(1), 1e-6));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusPriorSO3, ChordalL2mean) {
|
||||
// See Hartley13ijcv:
|
||||
// Cost function C(R) = \sum FrobeniusPrior(R,R_i)
|
||||
// Closed form solution = ClosestTo(C_e), where
|
||||
// C_e = \sum R_i !!!!
|
||||
|
||||
// We will test by computing mean of R1=exp(v1) R1^T=exp(-v1):
|
||||
using namespace ::so3;
|
||||
SO3 expected; // identity
|
||||
Matrix3 M = R1.matrix() + R1.matrix().transpose();
|
||||
EXPECT(assert_equal(expected, SO3::ClosestTo(M), 1e-6));
|
||||
EXPECT(assert_equal(expected, SO3::ChordalMean({R1, R1.inverse()}), 1e-6));
|
||||
|
||||
// manifold optimization gets same result as ChordalMean
|
||||
NonlinearFactorGraph graph;
|
||||
graph.emplace_shared<FrobeniusPrior<SO3> >(1, R1.matrix());
|
||||
graph.emplace_shared<FrobeniusPrior<SO3> >(1, R1.matrix().transpose());
|
||||
|
||||
Values initial;
|
||||
initial.insert<SO3>(1, R1.inverse());
|
||||
auto result = GaussNewtonOptimizer(graph, initial).optimize();
|
||||
EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 0.1); // Why so loose?
|
||||
EXPECT(assert_equal(expected, result.at<SO3>(1), 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusFactorSO3, evaluateError) {
|
||||
using namespace ::so3;
|
||||
auto factor = FrobeniusFactor<SO3>(1, 2);
|
||||
Vector actual = factor.evaluateError(R1, R2);
|
||||
Vector expected = R2.vec() - R1.vec();
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
Values values;
|
||||
values.insert(1, R1);
|
||||
values.insert(2, R2);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Commented out as SO(n) not yet supported (and might never be)
|
||||
// TEST(FrobeniusBetweenFactorSOn, evaluateError) {
|
||||
// using namespace ::so3;
|
||||
// auto factor =
|
||||
// FrobeniusBetweenFactor<SOn>(1, 2, SOn::FromMatrix(R12.matrix()));
|
||||
// Vector actual = factor.evaluateError(SOn::FromMatrix(R1.matrix()),
|
||||
// SOn::FromMatrix(R2.matrix()));
|
||||
// Vector expected = Vector9::Zero();
|
||||
// EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
// Values values;
|
||||
// values.insert(1, R1);
|
||||
// values.insert(2, R2);
|
||||
// EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
// }
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusBetweenFactorSO3, evaluateError) {
|
||||
using namespace ::so3;
|
||||
auto factor = FrobeniusBetweenFactor<SO3>(1, 2, R12);
|
||||
Vector actual = factor.evaluateError(R1, R2);
|
||||
Vector expected = Vector9::Zero();
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
Values values;
|
||||
values.insert(1, R1);
|
||||
values.insert(2, R2);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
namespace so4 {
|
||||
SO4 id;
|
||||
Vector6 v1 = (Vector(6) << 0.1, 0, 0, 0, 0, 0).finished();
|
||||
SO4 Q1 = SO4::Expmap(v1);
|
||||
Vector6 v2 = (Vector(6) << 0.01, 0.02, 0.03, 0.04, 0.05, 0.06).finished();
|
||||
SO4 Q2 = SO4::Expmap(v2);
|
||||
} // namespace so4
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusFactorSO4, evaluateError) {
|
||||
using namespace ::so4;
|
||||
auto factor = FrobeniusFactor<SO4>(1, 2, noiseModel::Unit::Create(6));
|
||||
Vector actual = factor.evaluateError(Q1, Q2);
|
||||
Vector expected = Q2.vec() - Q1.vec();
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
Values values;
|
||||
values.insert(1, Q1);
|
||||
values.insert(2, Q2);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusBetweenFactorSO4, evaluateError) {
|
||||
using namespace ::so4;
|
||||
Matrix4 M{I_4x4};
|
||||
M.topLeftCorner<3, 3>() = ::so3::R12.matrix();
|
||||
auto factor = FrobeniusBetweenFactor<SO4>(1, 2, Q1.between(Q2));
|
||||
Matrix H1, H2;
|
||||
Vector actual = factor.evaluateError(Q1, Q2, H1, H2);
|
||||
Vector expected = SO4::VectorN2::Zero();
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
Values values;
|
||||
values.insert(1, Q1);
|
||||
values.insert(2, Q2);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
namespace submanifold {
|
||||
SO4 id;
|
||||
Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished();
|
||||
SO3 R1 = SO3::Expmap(v1.tail<3>());
|
||||
SO4 Q1 = SO4::Expmap(v1);
|
||||
Vector6 v2 = (Vector(6) << 0, 0, 0, 0.01, 0.02, 0.03).finished();
|
||||
SO3 R2 = SO3::Expmap(v2.tail<3>());
|
||||
SO4 Q2 = SO4::Expmap(v2);
|
||||
SO3 R12 = R1.between(R2);
|
||||
} // namespace submanifold
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusWormholeFactor, evaluateError) {
|
||||
auto model = noiseModel::Isotropic::Sigma(6, 1.2); // dimension = 6 not 16
|
||||
for (const size_t p : {5, 4, 3}) {
|
||||
Matrix M = Matrix::Identity(p, p);
|
||||
M.topLeftCorner(3, 3) = submanifold::R1.matrix();
|
||||
SOn Q1(M);
|
||||
M.topLeftCorner(3, 3) = submanifold::R2.matrix();
|
||||
SOn Q2(M);
|
||||
auto factor =
|
||||
FrobeniusWormholeFactor(1, 2, Rot3(::so3::R12.matrix()), p, model);
|
||||
Matrix H1, H2;
|
||||
factor.evaluateError(Q1, Q2, H1, H2);
|
||||
|
||||
// Test derivatives
|
||||
Values values;
|
||||
values.insert(1, Q1);
|
||||
values.insert(2, Q2);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusWormholeFactor, equivalenceToSO3) {
|
||||
using namespace ::submanifold;
|
||||
auto R12 = ::so3::R12.retract(Vector3(0.1, 0.2, -0.1));
|
||||
auto model = noiseModel::Isotropic::Sigma(6, 1.2); // wrong dimension
|
||||
auto factor3 = FrobeniusBetweenFactor<SO3>(1, 2, R12, model);
|
||||
auto factor4 = FrobeniusWormholeFactor(1, 2, Rot3(R12.matrix()), 4, model);
|
||||
const Matrix3 E3(factor3.evaluateError(R1, R2).data());
|
||||
const Matrix43 E4(
|
||||
factor4.evaluateError(SOn(Q1.matrix()), SOn(Q2.matrix())).data());
|
||||
EXPECT(assert_equal((Matrix)E4.topLeftCorner<3, 3>(), E3, 1e-9));
|
||||
EXPECT(assert_equal((Matrix)E4.row(3), Matrix13::Zero(), 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -0,0 +1,87 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testTranslationRecovery.cpp
|
||||
* @author Frank Dellaert
|
||||
* @date March 2020
|
||||
* @brief test recovering translations when rotations are given.
|
||||
*/
|
||||
|
||||
#include <gtsam/sfm/TranslationRecovery.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
// We read the BAL file, which has 3 cameras in it, with poses. We then assume
|
||||
// the rotations are correct, but translations have to be estimated from
|
||||
// translation directions only. Since we have 3 cameras, A, B, and C, we can at
|
||||
// most create three relative measurements, let's call them w_aZb, w_aZc, and
|
||||
// bZc. These will be of type Unit3. We then call `recoverTranslations` which
|
||||
// sets up an optimization problem for the three unknown translations.
|
||||
TEST(TranslationRecovery, BAL) {
|
||||
const string filename = findExampleDataFile("dubrovnik-3-7-pre");
|
||||
SfmData db;
|
||||
bool success = readBAL(filename, db);
|
||||
if (!success) throw runtime_error("Could not access file!");
|
||||
|
||||
// Get camera poses, as Values
|
||||
size_t j = 0;
|
||||
Values poses;
|
||||
for (auto camera : db.cameras) {
|
||||
poses.insert(j++, camera.pose());
|
||||
}
|
||||
|
||||
// Simulate measurements
|
||||
const auto relativeTranslations = TranslationRecovery::SimulateMeasurements(
|
||||
poses, {{0, 1}, {0, 2}, {1, 2}});
|
||||
|
||||
// Check
|
||||
const Pose3 wTa = poses.at<Pose3>(0), wTb = poses.at<Pose3>(1),
|
||||
wTc = poses.at<Pose3>(2);
|
||||
const Point3 Ta = wTa.translation(), Tb = wTb.translation(),
|
||||
Tc = wTc.translation();
|
||||
const Rot3 aRw = wTa.rotation().inverse();
|
||||
const Unit3 w_aZb = relativeTranslations.at({0, 1});
|
||||
EXPECT(assert_equal(Unit3(Tb - Ta), w_aZb));
|
||||
const Unit3 w_aZc = relativeTranslations.at({0, 2});
|
||||
EXPECT(assert_equal(Unit3(Tc - Ta), w_aZc));
|
||||
|
||||
TranslationRecovery algorithm(relativeTranslations);
|
||||
const auto graph = algorithm.buildGraph();
|
||||
EXPECT_LONGS_EQUAL(3, graph.size());
|
||||
|
||||
// Translation recovery, version 1
|
||||
const double scale = 2.0;
|
||||
const auto result = algorithm.run(scale);
|
||||
|
||||
// Check result for first two translations, determined by prior
|
||||
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0)));
|
||||
EXPECT(assert_equal(Point3(2 * w_aZb.point3()), result.at<Point3>(1)));
|
||||
|
||||
// Check that the third translations is correct
|
||||
Point3 expected = (Tc - Ta) * (scale / (Tb - Ta).norm());
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(2), 1e-4));
|
||||
|
||||
// TODO(frank): how to get stats back?
|
||||
// EXPECT_DOUBLES_EQUAL(0.0199833, actualError, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -0,0 +1,110 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file timeFrobeniusFactor.cpp
|
||||
* @brief time FrobeniusFactor with BAL file
|
||||
* @author Frank Dellaert
|
||||
* @date June 6, 2015
|
||||
*/
|
||||
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/SO4.h>
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
#include <gtsam/linear/PCGSolver.h>
|
||||
#include <gtsam/linear/SubgraphPreconditioner.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearEquality.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/slam/FrobeniusFactor.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static SharedNoiseModel gNoiseModel = noiseModel::Unit::Create(2);
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
// primitive argument parsing:
|
||||
if (argc > 3) {
|
||||
throw runtime_error("Usage: timeFrobeniusFactor [g2oFile]");
|
||||
}
|
||||
|
||||
string g2oFile;
|
||||
try {
|
||||
if (argc > 1)
|
||||
g2oFile = argv[argc - 1];
|
||||
else
|
||||
g2oFile =
|
||||
"/Users/dellaert/git/2019c-notes-shonanrotationaveraging/matlabCode/"
|
||||
"datasets/randomTorus3D.g2o";
|
||||
// g2oFile = findExampleDataFile("sphere_smallnoise.graph");
|
||||
} catch (const exception& e) {
|
||||
cerr << e.what() << '\n';
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// Read G2O file
|
||||
const auto factors = parse3DFactors(g2oFile);
|
||||
const auto poses = parse3DPoses(g2oFile);
|
||||
|
||||
// Build graph
|
||||
NonlinearFactorGraph graph;
|
||||
// graph.add(NonlinearEquality<SO4>(0, SO4()));
|
||||
auto priorModel = noiseModel::Isotropic::Sigma(6, 10000);
|
||||
graph.add(PriorFactor<SO4>(0, SO4(), priorModel));
|
||||
for (const auto& factor : factors) {
|
||||
const auto& keys = factor->keys();
|
||||
const auto& Tij = factor->measured();
|
||||
const auto& model = factor->noiseModel();
|
||||
graph.emplace_shared<FrobeniusWormholeFactor>(
|
||||
keys[0], keys[1], SO3(Tij.rotation().matrix()), model);
|
||||
}
|
||||
|
||||
boost::mt19937 rng(42);
|
||||
|
||||
// Set parameters to be similar to ceres
|
||||
LevenbergMarquardtParams params;
|
||||
LevenbergMarquardtParams::SetCeresDefaults(¶ms);
|
||||
params.setLinearSolverType("MULTIFRONTAL_QR");
|
||||
// params.setVerbosityLM("SUMMARY");
|
||||
// params.linearSolverType = LevenbergMarquardtParams::Iterative;
|
||||
// auto pcg = boost::make_shared<PCGSolverParameters>();
|
||||
// pcg->preconditioner_ =
|
||||
// boost::make_shared<SubgraphPreconditionerParameters>();
|
||||
// boost::make_shared<BlockJacobiPreconditionerParameters>();
|
||||
// params.iterativeParams = pcg;
|
||||
|
||||
// Optimize
|
||||
for (size_t i = 0; i < 100; i++) {
|
||||
gttic_(optimize);
|
||||
Values initial;
|
||||
initial.insert(0, SO4());
|
||||
for (size_t j = 1; j < poses.size(); j++) {
|
||||
initial.insert(j, SO4::Random(rng));
|
||||
}
|
||||
LevenbergMarquardtOptimizer lm(graph, initial, params);
|
||||
Values result = lm.optimize();
|
||||
cout << "cost = " << graph.error(result) << endl;
|
||||
}
|
||||
|
||||
tictoc_finishedIteration_();
|
||||
tictoc_print_();
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Reference in New Issue