Merge remote-tracking branch 'origin/develop' into feature/wrap_cg
commit
72ce0c088a
|
@ -14,7 +14,8 @@ addons:
|
|||
- clang-9
|
||||
- build-essential pkg-config
|
||||
- cmake
|
||||
- libpython-dev python-numpy
|
||||
- python3-dev libpython-dev
|
||||
- python3-numpy
|
||||
- libboost-all-dev
|
||||
|
||||
# before_install:
|
||||
|
|
|
@ -537,54 +537,54 @@ endif()
|
|||
|
||||
print_build_options_for_target(gtsam)
|
||||
|
||||
message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
|
||||
message(STATUS " Use System Eigen : ${GTSAM_USE_SYSTEM_EIGEN} (Using version: ${GTSAM_EIGEN_VERSION})")
|
||||
|
||||
if(GTSAM_USE_TBB)
|
||||
message(STATUS " Use Intel TBB : Yes")
|
||||
message(STATUS " Use Intel TBB : Yes")
|
||||
elseif(TBB_FOUND)
|
||||
message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled")
|
||||
message(STATUS " Use Intel TBB : TBB found but GTSAM_WITH_TBB is disabled")
|
||||
else()
|
||||
message(STATUS " Use Intel TBB : TBB not found")
|
||||
message(STATUS " Use Intel TBB : TBB not found")
|
||||
endif()
|
||||
if(GTSAM_USE_EIGEN_MKL)
|
||||
message(STATUS " Eigen will use MKL : Yes")
|
||||
message(STATUS " Eigen will use MKL : Yes")
|
||||
elseif(MKL_FOUND)
|
||||
message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
|
||||
message(STATUS " Eigen will use MKL : MKL found but GTSAM_WITH_EIGEN_MKL is disabled")
|
||||
else()
|
||||
message(STATUS " Eigen will use MKL : MKL not found")
|
||||
message(STATUS " Eigen will use MKL : MKL not found")
|
||||
endif()
|
||||
if(GTSAM_USE_EIGEN_MKL_OPENMP)
|
||||
message(STATUS " Eigen will use MKL and OpenMP : Yes")
|
||||
message(STATUS " Eigen will use MKL and OpenMP : Yes")
|
||||
elseif(OPENMP_FOUND AND NOT GTSAM_WITH_EIGEN_MKL)
|
||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
|
||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL is disabled")
|
||||
elseif(OPENMP_FOUND AND NOT MKL_FOUND)
|
||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found")
|
||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but MKL not found")
|
||||
elseif(OPENMP_FOUND)
|
||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
|
||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP found but GTSAM_WITH_EIGEN_MKL_OPENMP is disabled")
|
||||
else()
|
||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found")
|
||||
message(STATUS " Eigen will use MKL and OpenMP : OpenMP not found")
|
||||
endif()
|
||||
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
|
||||
message(STATUS " Default allocator : ${GTSAM_DEFAULT_ALLOCATOR}")
|
||||
|
||||
if(GTSAM_THROW_CHEIRALITY_EXCEPTION)
|
||||
message(STATUS " Cheirality exceptions enabled : YES")
|
||||
message(STATUS " Cheirality exceptions enabled : YES")
|
||||
else()
|
||||
message(STATUS " Cheirality exceptions enabled : NO")
|
||||
message(STATUS " Cheirality exceptions enabled : NO")
|
||||
endif()
|
||||
|
||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||
if(CCACHE_FOUND AND GTSAM_BUILD_WITH_CCACHE)
|
||||
message(STATUS " Build with ccache : Yes")
|
||||
message(STATUS " Build with ccache : Yes")
|
||||
elseif(CCACHE_FOUND)
|
||||
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
|
||||
message(STATUS " Build with ccache : ccache found but GTSAM_BUILD_WITH_CCACHE is disabled")
|
||||
else()
|
||||
message(STATUS " Build with ccache : No")
|
||||
message(STATUS " Build with ccache : No")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
message(STATUS "Packaging flags ")
|
||||
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
|
||||
message(STATUS " CPack Generator : ${CPACK_GENERATOR}")
|
||||
message(STATUS " CPack Source Generator : ${CPACK_SOURCE_GENERATOR}")
|
||||
message(STATUS " CPack Generator : ${CPACK_GENERATOR}")
|
||||
|
||||
message(STATUS "GTSAM flags ")
|
||||
print_config_flag(${GTSAM_USE_QUATERNIONS} "Quaternions as default Rot3 ")
|
||||
|
@ -595,13 +595,17 @@ print_config_flag(${GTSAM_ALLOW_DEPRECATED_SINCE_V4} "Deprecated in GTSAM 4 al
|
|||
print_config_flag(${GTSAM_TYPEDEF_POINTS_TO_VECTORS} "Point3 is typedef to Vector3 ")
|
||||
print_config_flag(${GTSAM_SUPPORT_NESTED_DISSECTION} "Metis-based Nested Dissection ")
|
||||
print_config_flag(${GTSAM_TANGENT_PREINTEGRATION} "Use tangent-space preintegration")
|
||||
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
|
||||
print_config_flag(${GTSAM_BUILD_WRAP} "Build Wrap ")
|
||||
|
||||
message(STATUS "MATLAB toolbox flags ")
|
||||
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install matlab toolbox ")
|
||||
print_config_flag(${GTSAM_INSTALL_MATLAB_TOOLBOX} "Install MATLAB toolbox ")
|
||||
if (${GTSAM_INSTALL_MATLAB_TOOLBOX})
|
||||
message(STATUS " MATLAB root : ${MATLAB_ROOT}")
|
||||
message(STATUS " MEX binary : ${MEX_COMMAND}")
|
||||
endif()
|
||||
|
||||
message(STATUS "Cython toolbox flags ")
|
||||
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
|
||||
print_config_flag(${GTSAM_INSTALL_CYTHON_TOOLBOX} "Install Cython toolbox ")
|
||||
if(GTSAM_INSTALL_CYTHON_TOOLBOX)
|
||||
message(STATUS " Python version : ${GTSAM_PYTHON_VERSION}")
|
||||
endif()
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
FrobeniusFactor unit tests.
|
||||
Author: Frank Dellaert
|
||||
"""
|
||||
# pylint: disable=no-name-in-module, import-error, invalid-name
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
from gtsam import (Rot3, SO3, SO4, FrobeniusBetweenFactorSO4, FrobeniusFactorSO4,
|
||||
FrobeniusWormholeFactor, SOn)
|
||||
|
||||
id = SO4()
|
||||
v1 = np.array([0, 0, 0, 0.1, 0, 0])
|
||||
Q1 = SO4.Expmap(v1)
|
||||
v2 = np.array([0, 0, 0, 0.01, 0.02, 0.03])
|
||||
Q2 = SO4.Expmap(v2)
|
||||
|
||||
|
||||
class TestFrobeniusFactorSO4(unittest.TestCase):
|
||||
"""Test FrobeniusFactor factors."""
|
||||
|
||||
def test_frobenius_factor(self):
|
||||
"""Test creation of a factor that calculates the Frobenius norm."""
|
||||
factor = FrobeniusFactorSO4(1, 2)
|
||||
actual = factor.evaluateError(Q1, Q2)
|
||||
expected = (Q2.matrix()-Q1.matrix()).transpose().reshape((16,))
|
||||
np.testing.assert_array_equal(actual, expected)
|
||||
|
||||
def test_frobenius_between_factor(self):
|
||||
"""Test creation of a Frobenius BetweenFactor."""
|
||||
factor = FrobeniusBetweenFactorSO4(1, 2, Q1.between(Q2))
|
||||
actual = factor.evaluateError(Q1, Q2)
|
||||
expected = np.zeros((16,))
|
||||
np.testing.assert_array_almost_equal(actual, expected)
|
||||
|
||||
def test_frobenius_wormhole_factor(self):
|
||||
"""Test creation of a factor that calculates Shonan error."""
|
||||
R1 = SO3.Expmap(v1[3:])
|
||||
R2 = SO3.Expmap(v2[3:])
|
||||
factor = FrobeniusWormholeFactor(1, 2, Rot3(R1.between(R2).matrix()), p=4)
|
||||
I4 = SOn(4)
|
||||
Q1 = I4.retract(v1)
|
||||
Q2 = I4.retract(v2)
|
||||
actual = factor.evaluateError(Q1, Q2)
|
||||
expected = np.zeros((12,))
|
||||
np.testing.assert_array_almost_equal(actual, expected, decimal=4)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
|
@ -4,6 +4,12 @@ Author: Jing Wu and Frank Dellaert
|
|||
"""
|
||||
# pylint: disable=invalid-name
|
||||
|
||||
import sys
|
||||
if sys.version_info.major >= 3:
|
||||
from io import StringIO
|
||||
else:
|
||||
from cStringIO import StringIO
|
||||
|
||||
import unittest
|
||||
from datetime import datetime
|
||||
|
||||
|
@ -37,11 +43,19 @@ class TestOptimizeComet(GtsamTestCase):
|
|||
self.optimizer = gtsam.GaussNewtonOptimizer(
|
||||
graph, initial, self.params)
|
||||
|
||||
# setup output capture
|
||||
self.capturedOutput = StringIO()
|
||||
sys.stdout = self.capturedOutput
|
||||
|
||||
def tearDown(self):
|
||||
"""Reset print capture."""
|
||||
sys.stdout = sys.__stdout__
|
||||
|
||||
def test_simple_printing(self):
|
||||
"""Test with a simple hook."""
|
||||
|
||||
# Provide a hook that just prints
|
||||
def hook(_, error: float):
|
||||
def hook(_, error):
|
||||
print(error)
|
||||
|
||||
# Only thing we require from optimizer is an iterate method
|
||||
|
@ -65,7 +79,7 @@ class TestOptimizeComet(GtsamTestCase):
|
|||
+ str(time.hour)+":"+str(time.minute)+":"+str(time.second))
|
||||
|
||||
# I want to do some comet thing here
|
||||
def hook(optimizer, error: float):
|
||||
def hook(optimizer, error):
|
||||
comet.log_metric("Karcher error",
|
||||
error, optimizer.iterations())
|
||||
|
||||
|
@ -76,4 +90,4 @@ class TestOptimizeComet(GtsamTestCase):
|
|||
self.gtsamAssertEquals(actual.atRot3(KEY), self.expected)
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
unittest.main()
|
||||
|
|
|
@ -4,15 +4,11 @@ Author: Jing Wu and Frank Dellaert
|
|||
"""
|
||||
# pylint: disable=invalid-name
|
||||
|
||||
from typing import TypeVar
|
||||
|
||||
from gtsam import NonlinearOptimizer, NonlinearOptimizerParams
|
||||
import gtsam
|
||||
|
||||
T = TypeVar('T')
|
||||
|
||||
|
||||
def optimize(optimizer: T, check_convergence, hook):
|
||||
def optimize(optimizer, check_convergence, hook):
|
||||
""" Given an optimizer and a convergence check, iterate until convergence.
|
||||
After each iteration, hook(optimizer, error) is called.
|
||||
After the function, use values and errors to get the result.
|
||||
|
@ -36,8 +32,8 @@ def optimize(optimizer: T, check_convergence, hook):
|
|||
current_error = new_error
|
||||
|
||||
|
||||
def gtsam_optimize(optimizer: NonlinearOptimizer,
|
||||
params: NonlinearOptimizerParams,
|
||||
def gtsam_optimize(optimizer,
|
||||
params,
|
||||
hook):
|
||||
""" Given an optimizer and params, iterate until convergence.
|
||||
After each iteration, hook(optimizer) is called.
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
Cython>=0.25.2
|
||||
backports_abc>=0.5
|
||||
numpy>=1.12.0
|
||||
numpy>=1.11.0
|
||||
|
|
|
@ -1,7 +1,4 @@
|
|||
set (excluded_examples
|
||||
DiscreteBayesNet_FG.cpp
|
||||
UGM_chain.cpp
|
||||
UGM_small.cpp
|
||||
elaboratePoint2KalmanFilter.cpp
|
||||
)
|
||||
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
VERTEX_SE3:QUAT 0 -3.865747774038187 0.06639337702667497 -0.16064874691945374 0.024595211709139555 0.49179523413089893 -0.06279232989379242 0.8680954132776109
|
||||
VERTEX_SE3:QUAT 1 -3.614793159814815 0.04774490041587656 -0.2837650367985949 0.00991721787943912 0.4854918961891193 -0.042343290945895576 0.8731588132957809
|
||||
VERTEX_SE3:QUAT 2 -3.255096913553434 0.013296754286114112 -0.5339792269680574 -0.027851108010665374 0.585478168397957 -0.05088341463532465 0.8086102325762403
|
||||
EDGE_SE3:QUAT 0 1 0.2509546142233723 -0.01864847661079841 -0.12311628987914114 -0.022048798853273946 -0.01796327847857683 0.010210006313668573 0.9995433591728293 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
|
||||
EDGE_SE3:QUAT 0 2 0.6106508604847534 -0.05309662274056086 -0.3733304800486037 -0.054972994022992064 0.10432547598981769 -0.02221474884651081 0.9927742290779572 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
|
||||
EDGE_SE3:QUAT 1 2 0.3596962462613811 -0.03444814612976245 -0.25021419016946256 -0.03174661848656213 0.11646825423134777 -0.02951742735854383 0.9922479626852876 100.0 0.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 0.0 100.0 0.0 0.0 0.0 25.0 0.0 0.0 25.0 0.0 25.0
|
|
@ -0,0 +1,11 @@
|
|||
VERTEX_SE3:QUAT 0 0 0 0 0 0 0 1
|
||||
VERTEX_SE3:QUAT 1 0 0 0 0 0 0 1
|
||||
VERTEX_SE3:QUAT 2 0 0 0 0.00499994 0.00499994 0.00499994 0.999963
|
||||
VERTEX_SE3:QUAT 3 0 0 0 -0.00499994 -0.00499994 -0.00499994 0.999963
|
||||
VERTEX_SE3:QUAT 4 0 0 0 0.00499994 0.00499994 0.00499994 0.999963
|
||||
EDGE_SE3:QUAT 1 2 1 2 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||
EDGE_SE3:QUAT 2 3 -3.26795e-07 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||
EDGE_SE3:QUAT 3 4 1 1 0 0 0 0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||
EDGE_SE3:QUAT 3 1 6.9282e-07 2 0 0 0 1 1.73205e-07 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||
EDGE_SE3:QUAT 1 4 -1 1 0 0 0 -0.707107 0.707107 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
||||
EDGE_SE3:QUAT 0 1 0 0 0 0 0 0 1 100 0 0 0 0 0 100 0 0 0 0 100 0 0 0 100 0 0 100 0 100
|
|
@ -10,110 +10,111 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file DiscreteBayesNet_FG.cpp
|
||||
* @file DiscreteBayesNet_graph.cpp
|
||||
* @brief Discrete Bayes Net example using Factor Graphs
|
||||
* @author Abhijit
|
||||
* @date Jun 4, 2012
|
||||
*
|
||||
* We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009, p529]
|
||||
* You may be familiar with other graphical model packages like BNT (available
|
||||
* at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this is used as an
|
||||
* example. The following demo is same as that in the above link, except that
|
||||
* everything is using GTSAM.
|
||||
* We use the famous Rain/Cloudy/Sprinkler Example of [Russell & Norvig, 2009,
|
||||
* p529] You may be familiar with other graphical model packages like BNT
|
||||
* (available at http://bnt.googlecode.com/svn/trunk/docs/usage.html) where this
|
||||
* is used as an example. The following demo is same as that in the above link,
|
||||
* except that everything is using GTSAM.
|
||||
*/
|
||||
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
#include <gtsam/discrete/DiscreteSequentialSolver.h>
|
||||
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||
|
||||
#include <iomanip>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
// Define keys and a print function
|
||||
Key C(1), S(2), R(3), W(4);
|
||||
auto print = [=](DiscreteFactor::sharedValues values) {
|
||||
cout << boolalpha << "Cloudy = " << static_cast<bool>((*values)[C])
|
||||
<< " Sprinkler = " << static_cast<bool>((*values)[S])
|
||||
<< " Rain = " << boolalpha << static_cast<bool>((*values)[R])
|
||||
<< " WetGrass = " << static_cast<bool>((*values)[W]) << endl;
|
||||
};
|
||||
|
||||
// We assume binary state variables
|
||||
// we have 0 == "False" and 1 == "True"
|
||||
const size_t nrStates = 2;
|
||||
|
||||
// define variables
|
||||
DiscreteKey Cloudy(1, nrStates), Sprinkler(2, nrStates), Rain(3, nrStates),
|
||||
WetGrass(4, nrStates);
|
||||
DiscreteKey Cloudy(C, nrStates), Sprinkler(S, nrStates), Rain(R, nrStates),
|
||||
WetGrass(W, nrStates);
|
||||
|
||||
// create Factor Graph of the bayes net
|
||||
DiscreteFactorGraph graph;
|
||||
|
||||
// add factors
|
||||
graph.add(Cloudy, "0.5 0.5"); //P(Cloudy)
|
||||
graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); //P(Sprinkler | Cloudy)
|
||||
graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); //P(Rain | Cloudy)
|
||||
graph.add(Cloudy, "0.5 0.5"); // P(Cloudy)
|
||||
graph.add(Cloudy & Sprinkler, "0.5 0.5 0.9 0.1"); // P(Sprinkler | Cloudy)
|
||||
graph.add(Cloudy & Rain, "0.8 0.2 0.2 0.8"); // P(Rain | Cloudy)
|
||||
graph.add(Sprinkler & Rain & WetGrass,
|
||||
"1 0 0.1 0.9 0.1 0.9 0.001 0.99"); //P(WetGrass | Sprinkler, Rain)
|
||||
"1 0 0.1 0.9 0.1 0.9 0.001 0.99"); // P(WetGrass | Sprinkler, Rain)
|
||||
|
||||
// Alternatively we can also create a DiscreteBayesNet, add DiscreteConditional
|
||||
// factors and create a FactorGraph from it. (See testDiscreteBayesNet.cpp)
|
||||
// Alternatively we can also create a DiscreteBayesNet, add
|
||||
// DiscreteConditional factors and create a FactorGraph from it. (See
|
||||
// testDiscreteBayesNet.cpp)
|
||||
|
||||
// Since this is a relatively small distribution, we can as well print
|
||||
// the whole distribution..
|
||||
cout << "Distribution of Example: " << endl;
|
||||
cout << setw(11) << "Cloudy(C)" << setw(14) << "Sprinkler(S)" << setw(10)
|
||||
<< "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)"
|
||||
<< endl;
|
||||
<< "Rain(R)" << setw(14) << "WetGrass(W)" << setw(15) << "P(C,S,R,W)"
|
||||
<< endl;
|
||||
for (size_t a = 0; a < nrStates; a++)
|
||||
for (size_t m = 0; m < nrStates; m++)
|
||||
for (size_t h = 0; h < nrStates; h++)
|
||||
for (size_t c = 0; c < nrStates; c++) {
|
||||
DiscreteFactor::Values values;
|
||||
values[Cloudy.first] = c;
|
||||
values[Sprinkler.first] = h;
|
||||
values[Rain.first] = m;
|
||||
values[WetGrass.first] = a;
|
||||
values[C] = c;
|
||||
values[S] = h;
|
||||
values[R] = m;
|
||||
values[W] = a;
|
||||
double prodPot = graph(values);
|
||||
cout << boolalpha << setw(8) << (bool) c << setw(14)
|
||||
<< (bool) h << setw(12) << (bool) m << setw(13)
|
||||
<< (bool) a << setw(16) << prodPot << endl;
|
||||
cout << setw(8) << static_cast<bool>(c) << setw(14)
|
||||
<< static_cast<bool>(h) << setw(12) << static_cast<bool>(m)
|
||||
<< setw(13) << static_cast<bool>(a) << setw(16) << prodPot
|
||||
<< endl;
|
||||
}
|
||||
|
||||
|
||||
// "Most Probable Explanation", i.e., configuration with largest value
|
||||
DiscreteSequentialSolver solver(graph);
|
||||
DiscreteFactor::sharedValues optimalDecoding = solver.optimize();
|
||||
cout <<"\nMost Probable Explanation (MPE):" << endl;
|
||||
cout << boolalpha << "Cloudy = " << (bool)(*optimalDecoding)[Cloudy.first]
|
||||
<< " Sprinkler = " << (bool)(*optimalDecoding)[Sprinkler.first]
|
||||
<< " Rain = " << boolalpha << (bool)(*optimalDecoding)[Rain.first]
|
||||
<< " WetGrass = " << (bool)(*optimalDecoding)[WetGrass.first]<< endl;
|
||||
DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize();
|
||||
cout << "\nMost Probable Explanation (MPE):" << endl;
|
||||
print(mpe);
|
||||
|
||||
// "Inference" We show an inference query like: probability that the Sprinkler
|
||||
// was on; given that the grass is wet i.e. P( S | C=0) = ?
|
||||
|
||||
// "Inference" We show an inference query like: probability that the Sprinkler was on;
|
||||
// given that the grass is wet i.e. P( S | W=1) =?
|
||||
cout << "\nInference Query: Probability of Sprinkler being on given Grass is Wet" << endl;
|
||||
// add evidence that it is not Cloudy
|
||||
graph.add(Cloudy, "1 0");
|
||||
|
||||
// Method 1: we can compute the joint marginal P(S,W) and from that we can compute
|
||||
// P(S | W=1) = P(S,W=1)/P(W=1) We do this in following three steps..
|
||||
// solve again, now with evidence
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize();
|
||||
|
||||
//Step1: Compute P(S,W)
|
||||
DiscreteFactorGraph jointFG;
|
||||
jointFG = *solver.jointFactorGraph(DiscreteKeys(Sprinkler & WetGrass).indices());
|
||||
DecisionTreeFactor probSW = jointFG.product();
|
||||
cout << "\nMPE given C=0:" << endl;
|
||||
print(mpe_with_evidence);
|
||||
|
||||
//Step2: Compute P(W)
|
||||
DiscreteFactor::shared_ptr probW = solver.marginalFactor(WetGrass.first);
|
||||
|
||||
//Step3: Computer P(S | W=1) = P(S,W=1)/P(W=1)
|
||||
DiscreteFactor::Values values;
|
||||
values[WetGrass.first] = 1;
|
||||
|
||||
//print P(S=0|W=1)
|
||||
values[Sprinkler.first] = 0;
|
||||
cout << "P(S=0|W=1) = " << probSW(values)/(*probW)(values) << endl;
|
||||
|
||||
//print P(S=1|W=1)
|
||||
values[Sprinkler.first] = 1;
|
||||
cout << "P(S=1|W=1) = " << probSW(values)/(*probW)(values) << endl;
|
||||
|
||||
// TODO: Method 2 : One way is to modify the factor graph to
|
||||
// incorporate the evidence node and compute the marginal
|
||||
// TODO: graph.addEvidence(Cloudy,0);
|
||||
// we can also calculate arbitrary marginals:
|
||||
DiscreteMarginals marginals(graph);
|
||||
cout << "\nP(S=1|C=0):" << marginals.marginalProbabilities(Sprinkler)[1]
|
||||
<< endl;
|
||||
cout << "\nP(R=0|C=0):" << marginals.marginalProbabilities(Rain)[0] << endl;
|
||||
cout << "\nP(W=1|C=0):" << marginals.marginalProbabilities(WetGrass)[1]
|
||||
<< endl;
|
||||
|
||||
// We can also sample from it
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||
print(sample);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,353 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file IMUKittiExampleGPS
|
||||
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE
|
||||
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics
|
||||
*/
|
||||
|
||||
// GTSAM related includes.
|
||||
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||
#include <gtsam/navigation/GPSFactor.h>
|
||||
#include <gtsam/navigation/ImuFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
#include <gtsam/nonlinear/ISAM2Params.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
|
||||
|
||||
struct KittiCalibration {
|
||||
double body_ptx;
|
||||
double body_pty;
|
||||
double body_ptz;
|
||||
double body_prx;
|
||||
double body_pry;
|
||||
double body_prz;
|
||||
double accelerometer_sigma;
|
||||
double gyroscope_sigma;
|
||||
double integration_sigma;
|
||||
double accelerometer_bias_sigma;
|
||||
double gyroscope_bias_sigma;
|
||||
double average_delta_t;
|
||||
};
|
||||
|
||||
struct ImuMeasurement {
|
||||
double time;
|
||||
double dt;
|
||||
Vector3 accelerometer;
|
||||
Vector3 gyroscope; // omega
|
||||
};
|
||||
|
||||
struct GpsMeasurement {
|
||||
double time;
|
||||
Vector3 position; // x,y,z
|
||||
};
|
||||
|
||||
const string output_filename = "IMUKittiExampleGPSResults.csv";
|
||||
|
||||
void loadKittiData(KittiCalibration& kitti_calibration,
|
||||
vector<ImuMeasurement>& imu_measurements,
|
||||
vector<GpsMeasurement>& gps_measurements) {
|
||||
string line;
|
||||
|
||||
// Read IMU metadata and compute relative sensor pose transforms
|
||||
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma
|
||||
// AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT
|
||||
string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
|
||||
ifstream imu_metadata(imu_metadata_file.c_str());
|
||||
|
||||
printf("-- Reading sensor metadata\n");
|
||||
|
||||
getline(imu_metadata, line, '\n'); // ignore the first line
|
||||
|
||||
// Load Kitti calibration
|
||||
getline(imu_metadata, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&kitti_calibration.body_ptx,
|
||||
&kitti_calibration.body_pty,
|
||||
&kitti_calibration.body_ptz,
|
||||
&kitti_calibration.body_prx,
|
||||
&kitti_calibration.body_pry,
|
||||
&kitti_calibration.body_prz,
|
||||
&kitti_calibration.accelerometer_sigma,
|
||||
&kitti_calibration.gyroscope_sigma,
|
||||
&kitti_calibration.integration_sigma,
|
||||
&kitti_calibration.accelerometer_bias_sigma,
|
||||
&kitti_calibration.gyroscope_bias_sigma,
|
||||
&kitti_calibration.average_delta_t);
|
||||
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
|
||||
kitti_calibration.body_ptx,
|
||||
kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz,
|
||||
kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry,
|
||||
kitti_calibration.body_prz,
|
||||
kitti_calibration.accelerometer_sigma,
|
||||
kitti_calibration.gyroscope_sigma,
|
||||
kitti_calibration.integration_sigma,
|
||||
kitti_calibration.accelerometer_bias_sigma,
|
||||
kitti_calibration.gyroscope_bias_sigma,
|
||||
kitti_calibration.average_delta_t);
|
||||
|
||||
// Read IMU data
|
||||
// Time dt accelX accelY accelZ omegaX omegaY omegaZ
|
||||
string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
|
||||
printf("-- Reading IMU measurements from file\n");
|
||||
{
|
||||
ifstream imu_data(imu_data_file.c_str());
|
||||
getline(imu_data, line, '\n'); // ignore the first line
|
||||
|
||||
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0;
|
||||
while (!imu_data.eof()) {
|
||||
getline(imu_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&time, &dt,
|
||||
&acc_x, &acc_y, &acc_z,
|
||||
&gyro_x, &gyro_y, &gyro_z);
|
||||
|
||||
ImuMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.dt = dt;
|
||||
measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
|
||||
measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
|
||||
imu_measurements.push_back(measurement);
|
||||
}
|
||||
}
|
||||
|
||||
// Read GPS data
|
||||
// Time,X,Y,Z
|
||||
string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
|
||||
printf("-- Reading GPS measurements from file\n");
|
||||
{
|
||||
ifstream gps_data(gps_data_file.c_str());
|
||||
getline(gps_data, line, '\n'); // ignore the first line
|
||||
|
||||
double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
|
||||
while (!gps_data.eof()) {
|
||||
getline(gps_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
|
||||
|
||||
GpsMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.position = Vector3(gps_x, gps_y, gps_z);
|
||||
gps_measurements.push_back(measurement);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
KittiCalibration kitti_calibration;
|
||||
vector<ImuMeasurement> imu_measurements;
|
||||
vector<GpsMeasurement> gps_measurements;
|
||||
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
||||
|
||||
Vector6 BodyP = (Vector(6) << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz,
|
||||
kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||
.finished();
|
||||
auto body_T_imu = Pose3::Expmap(BodyP);
|
||||
if (!body_T_imu.equals(Pose3(), 1e-5)) {
|
||||
printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
// Configure different variables
|
||||
double t_offset = gps_measurements[0].time;
|
||||
size_t first_gps_pose = 1;
|
||||
size_t gps_skip = 10; // Skip this many GPS measurements each time
|
||||
double g = 9.8;
|
||||
auto w_coriolis = Vector3(); // zero vector
|
||||
|
||||
// Configure noise models
|
||||
auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector(6) << Vector3::Constant(0),
|
||||
Vector3::Constant(1.0/0.07))
|
||||
.finished());
|
||||
|
||||
// Set initial conditions for the estimated trajectory
|
||||
// initial pose is the reference frame (navigation frame)
|
||||
auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||
auto current_velocity_global = Vector3(); // the vehicle is stationary at the beginning at position 0,0,0
|
||||
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
||||
|
||||
auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector(6) << Vector3::Constant(0),
|
||||
Vector3::Constant(1.0))
|
||||
.finished());
|
||||
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
||||
auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector(6) << Vector3::Constant(0.100),
|
||||
Vector3::Constant(5.00e-05))
|
||||
.finished());
|
||||
|
||||
// Set IMU preintegration parameters
|
||||
Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
|
||||
Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
|
||||
// error committed in integrating position from velocities
|
||||
Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2);
|
||||
|
||||
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
|
||||
imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous
|
||||
imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous
|
||||
imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous
|
||||
imu_params->omegaCoriolis = w_coriolis;
|
||||
|
||||
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement = nullptr;
|
||||
|
||||
// Set ISAM2 parameters and create ISAM2 solver object
|
||||
ISAM2Params isam_params;
|
||||
isam_params.factorization = ISAM2Params::CHOLESKY;
|
||||
isam_params.relinearizeSkip = 10;
|
||||
|
||||
ISAM2 isam(isam_params);
|
||||
|
||||
// Create the factor graph and values object that will store new factors and values to add to the incremental graph
|
||||
NonlinearFactorGraph new_factors;
|
||||
Values new_values; // values storing the initial estimates of new nodes in the factor graph
|
||||
|
||||
/// Main loop:
|
||||
/// (1) we read the measurements
|
||||
/// (2) we create the corresponding factors in the graph
|
||||
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory
|
||||
printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n");
|
||||
size_t j = 0;
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
// At each non=IMU measurement we initialize a new node in the graph
|
||||
auto current_pose_key = X(i);
|
||||
auto current_vel_key = V(i);
|
||||
auto current_bias_key = B(i);
|
||||
double t = gps_measurements[i].time;
|
||||
|
||||
if (i == first_gps_pose) {
|
||||
// Create initial estimate and prior on initial pose, velocity, and biases
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, current_pose_global, sigma_init_x);
|
||||
new_factors.emplace_shared<PriorFactor<Vector3>>(current_vel_key, current_velocity_global, sigma_init_v);
|
||||
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(current_bias_key, current_bias, sigma_init_b);
|
||||
} else {
|
||||
double t_previous = gps_measurements[i-1].time;
|
||||
|
||||
// Summarize IMU data between the previous GPS measurement and now
|
||||
current_summarized_measurement = std::make_shared<PreintegratedImuMeasurements>(imu_params, current_bias);
|
||||
static size_t included_imu_measurement_count = 0;
|
||||
while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
|
||||
if (imu_measurements[j].time >= t_previous) {
|
||||
current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer,
|
||||
imu_measurements[j].gyroscope,
|
||||
imu_measurements[j].dt);
|
||||
included_imu_measurement_count++;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
|
||||
// Create IMU factor
|
||||
auto previous_pose_key = X(i-1);
|
||||
auto previous_vel_key = V(i-1);
|
||||
auto previous_bias_key = B(i-1);
|
||||
|
||||
new_factors.emplace_shared<ImuFactor>(previous_pose_key, previous_vel_key,
|
||||
current_pose_key, current_vel_key,
|
||||
previous_bias_key, *current_summarized_measurement);
|
||||
|
||||
// Bias evolution as given in the IMU metadata
|
||||
auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector(6) <<
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
|
||||
.finished());
|
||||
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(previous_bias_key,
|
||||
current_bias_key,
|
||||
imuBias::ConstantBias(),
|
||||
sigma_between_b);
|
||||
|
||||
// Create GPS factor
|
||||
auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position);
|
||||
if ((i % gps_skip) == 0) {
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, gps_pose, noise_model_gps);
|
||||
new_values.insert(current_pose_key, gps_pose);
|
||||
|
||||
printf("################ POSE INCLUDED AT TIME %lf ################\n", t);
|
||||
gps_pose.translation().print();
|
||||
printf("\n\n");
|
||||
} else {
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
}
|
||||
|
||||
// Add initial values for velocity and bias based on the previous estimates
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
|
||||
// Update solver
|
||||
// =======================================================================
|
||||
// We accumulate 2*GPSskip GPS measurements before updating the solver at
|
||||
// first so that the heading becomes observable.
|
||||
if (i > (first_gps_pose + 2*gps_skip)) {
|
||||
printf("################ NEW FACTORS AT TIME %lf ################\n", t);
|
||||
new_factors.print();
|
||||
|
||||
isam.update(new_factors, new_values);
|
||||
|
||||
// Reset the newFactors and newValues list
|
||||
new_factors.resize(0);
|
||||
new_values.clear();
|
||||
|
||||
// Extract the result/current estimates
|
||||
Values result = isam.calculateEstimate();
|
||||
|
||||
current_pose_global = result.at<Pose3>(current_pose_key);
|
||||
current_velocity_global = result.at<Vector3>(current_vel_key);
|
||||
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
|
||||
|
||||
printf("\n################ POSE AT TIME %lf ################\n", t);
|
||||
current_pose_global.print();
|
||||
printf("\n\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save results to file
|
||||
printf("\nWriting results to file...\n");
|
||||
FILE* fp_out = fopen(output_filename.c_str(), "w+");
|
||||
fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
|
||||
|
||||
Values result = isam.calculateEstimate();
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
auto pose_key = X(i);
|
||||
auto vel_key = V(i);
|
||||
auto bias_key = B(i);
|
||||
|
||||
auto pose = result.at<Pose3>(pose_key);
|
||||
auto velocity = result.at<Vector3>(vel_key);
|
||||
auto bias = result.at<imuBias::ConstantBias>(bias_key);
|
||||
|
||||
auto pose_quat = pose.rotation().toQuaternion();
|
||||
auto gps = gps_measurements[i].position;
|
||||
|
||||
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||
gps_measurements[i].time,
|
||||
pose.x(), pose.y(), pose.z(),
|
||||
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(),
|
||||
gps(0), gps(1), gps(2));
|
||||
}
|
||||
|
||||
fclose(fp_out);
|
||||
}
|
|
@ -10,7 +10,7 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file small.cpp
|
||||
* @file UGM_chain.cpp
|
||||
* @brief UGM (undirected graphical model) examples: chain
|
||||
* @author Frank Dellaert
|
||||
* @author Abhijit Kundu
|
||||
|
@ -19,10 +19,9 @@
|
|||
* for more explanation. This code demos the same example using GTSAM.
|
||||
*/
|
||||
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
#include <gtsam/discrete/DiscreteSequentialSolver.h>
|
||||
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||
|
||||
#include <iomanip>
|
||||
|
||||
|
@ -30,9 +29,8 @@ using namespace std;
|
|||
using namespace gtsam;
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
|
||||
// Set Number of Nodes in the Graph
|
||||
const int nrNodes = 60;
|
||||
// Set Number of Nodes in the Graph
|
||||
const int nrNodes = 60;
|
||||
|
||||
// Each node takes 1 of 7 possible states denoted by 0-6 in following order:
|
||||
// ["VideoGames" "Industry" "GradSchool" "VideoGames(with PhD)"
|
||||
|
@ -40,70 +38,55 @@ int main(int argc, char** argv) {
|
|||
const size_t nrStates = 7;
|
||||
|
||||
// define variables
|
||||
vector<DiscreteKey> nodes;
|
||||
for (int i = 0; i < nrNodes; i++){
|
||||
DiscreteKey dk(i, nrStates);
|
||||
nodes.push_back(dk);
|
||||
}
|
||||
vector<DiscreteKey> nodes;
|
||||
for (int i = 0; i < nrNodes; i++) {
|
||||
DiscreteKey dk(i, nrStates);
|
||||
nodes.push_back(dk);
|
||||
}
|
||||
|
||||
// create graph
|
||||
DiscreteFactorGraph graph;
|
||||
|
||||
// add node potentials
|
||||
graph.add(nodes[0], ".3 .6 .1 0 0 0 0");
|
||||
for (int i = 1; i < nrNodes; i++)
|
||||
graph.add(nodes[i], "1 1 1 1 1 1 1");
|
||||
for (int i = 1; i < nrNodes; i++) graph.add(nodes[i], "1 1 1 1 1 1 1");
|
||||
|
||||
const std::string edgePotential = ".08 .9 .01 0 0 0 .01 "
|
||||
".03 .95 .01 0 0 0 .01 "
|
||||
".06 .06 .75 .05 .05 .02 .01 "
|
||||
"0 0 0 .3 .6 .09 .01 "
|
||||
"0 0 0 .02 .95 .02 .01 "
|
||||
"0 0 0 .01 .01 .97 .01 "
|
||||
"0 0 0 0 0 0 1";
|
||||
const std::string edgePotential =
|
||||
".08 .9 .01 0 0 0 .01 "
|
||||
".03 .95 .01 0 0 0 .01 "
|
||||
".06 .06 .75 .05 .05 .02 .01 "
|
||||
"0 0 0 .3 .6 .09 .01 "
|
||||
"0 0 0 .02 .95 .02 .01 "
|
||||
"0 0 0 .01 .01 .97 .01 "
|
||||
"0 0 0 0 0 0 1";
|
||||
|
||||
// add edge potentials
|
||||
for (int i = 0; i < nrNodes - 1; i++)
|
||||
graph.add(nodes[i] & nodes[i + 1], edgePotential);
|
||||
|
||||
cout << "Created Factor Graph with " << nrNodes << " variable nodes and "
|
||||
<< graph.size() << " factors (Unary+Edge).";
|
||||
<< graph.size() << " factors (Unary+Edge).";
|
||||
|
||||
// "Decoding", i.e., configuration with largest value
|
||||
// We use sequential variable elimination
|
||||
DiscreteSequentialSolver solver(graph);
|
||||
DiscreteFactor::sharedValues optimalDecoding = solver.optimize();
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||
optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n");
|
||||
|
||||
// "Inference" Computing marginals for each node
|
||||
|
||||
|
||||
cout << "\nComputing Node Marginals ..(Sequential Elimination)" << endl;
|
||||
gttic_(Sequential);
|
||||
for (vector<DiscreteKey>::iterator itr = nodes.begin(); itr != nodes.end();
|
||||
++itr) {
|
||||
//Compute the marginal
|
||||
Vector margProbs = solver.marginalProbabilities(*itr);
|
||||
|
||||
//Print the marginals
|
||||
cout << "Node#" << setw(4) << itr->first << " : ";
|
||||
print(margProbs);
|
||||
}
|
||||
gttoc_(Sequential);
|
||||
|
||||
// Here we'll make use of DiscreteMarginals class, which makes use of
|
||||
// bayes-tree based shortcut evaluation of marginals
|
||||
DiscreteMarginals marginals(graph);
|
||||
|
||||
cout << "\nComputing Node Marginals ..(BayesTree based)" << endl;
|
||||
gttic_(Multifrontal);
|
||||
for (vector<DiscreteKey>::iterator itr = nodes.begin(); itr != nodes.end();
|
||||
++itr) {
|
||||
//Compute the marginal
|
||||
Vector margProbs = marginals.marginalProbabilities(*itr);
|
||||
for (vector<DiscreteKey>::iterator it = nodes.begin(); it != nodes.end();
|
||||
++it) {
|
||||
// Compute the marginal
|
||||
Vector margProbs = marginals.marginalProbabilities(*it);
|
||||
|
||||
//Print the marginals
|
||||
cout << "Node#" << setw(4) << itr->first << " : ";
|
||||
// Print the marginals
|
||||
cout << "Node#" << setw(4) << it->first << " : ";
|
||||
print(margProbs);
|
||||
}
|
||||
gttoc_(Multifrontal);
|
||||
|
@ -111,4 +94,3 @@ int main(int argc, char** argv) {
|
|||
tictoc_print_();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -10,15 +10,16 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file small.cpp
|
||||
* @file UGM_small.cpp
|
||||
* @brief UGM (undirected graphical model) examples: small
|
||||
* @author Frank Dellaert
|
||||
*
|
||||
* See http://www.di.ens.fr/~mschmidt/Software/UGM/small.html
|
||||
*/
|
||||
|
||||
#include <gtsam/base/Vector.h>
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
#include <gtsam/discrete/DiscreteSequentialSolver.h>
|
||||
#include <gtsam/discrete/DiscreteMarginals.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
@ -61,24 +62,24 @@ int main(int argc, char** argv) {
|
|||
|
||||
// "Decoding", i.e., configuration with largest value (MPE)
|
||||
// We use sequential variable elimination
|
||||
DiscreteSequentialSolver solver(graph);
|
||||
DiscreteFactor::sharedValues optimalDecoding = solver.optimize();
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||
optimalDecoding->print("\noptimalDecoding");
|
||||
|
||||
// "Inference" Computing marginals
|
||||
cout << "\nComputing Node Marginals .." << endl;
|
||||
Vector margProbs;
|
||||
DiscreteMarginals marginals(graph);
|
||||
|
||||
margProbs = solver.marginalProbabilities(Cathy);
|
||||
Vector margProbs = marginals.marginalProbabilities(Cathy);
|
||||
print(margProbs, "Cathy's Node Marginal:");
|
||||
|
||||
margProbs = solver.marginalProbabilities(Heather);
|
||||
margProbs = marginals.marginalProbabilities(Heather);
|
||||
print(margProbs, "Heather's Node Marginal");
|
||||
|
||||
margProbs = solver.marginalProbabilities(Mark);
|
||||
margProbs = marginals.marginalProbabilities(Mark);
|
||||
print(margProbs, "Mark's Node Marginal");
|
||||
|
||||
margProbs = solver.marginalProbabilities(Allison);
|
||||
margProbs = marginals.marginalProbabilities(Allison);
|
||||
print(margProbs, "Allison's Node Marginal");
|
||||
|
||||
return 0;
|
||||
|
|
33
gtsam.h
33
gtsam.h
|
@ -281,7 +281,7 @@ virtual class Value {
|
|||
};
|
||||
|
||||
#include <gtsam/base/GenericValue.h>
|
||||
template<T = {Vector, gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::StereoPoint2, gtsam::Cal3_S2, gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::imuBias::ConstantBias}>
|
||||
template<T = {Vector, Matrix, gtsam::Point2, gtsam::Point3, gtsam::Rot2, gtsam::Rot3, gtsam::Pose2, gtsam::Pose3, gtsam::StereoPoint2, gtsam::Cal3_S2, gtsam::Cal3DS2, gtsam::Cal3Bundler, gtsam::EssentialMatrix, gtsam::CalibratedCamera, gtsam::SimpleCamera, gtsam::imuBias::ConstantBias}>
|
||||
virtual class GenericValue : gtsam::Value {
|
||||
void serializable() const;
|
||||
};
|
||||
|
@ -598,6 +598,7 @@ class SOn {
|
|||
// Standard Constructors
|
||||
SOn(size_t n);
|
||||
static gtsam::SOn FromMatrix(Matrix R);
|
||||
static gtsam::SOn Lift(size_t n, Matrix R);
|
||||
|
||||
// Testable
|
||||
void print(string s) const;
|
||||
|
@ -2851,6 +2852,34 @@ virtual class KarcherMeanFactor : gtsam::NonlinearFactor {
|
|||
KarcherMeanFactor(const gtsam::KeyVector& keys);
|
||||
};
|
||||
|
||||
#include <gtsam/slam/FrobeniusFactor.h>
|
||||
gtsam::noiseModel::Isotropic* ConvertPose3NoiseModel(
|
||||
gtsam::noiseModel::Base* model, size_t d);
|
||||
|
||||
template<T = {gtsam::SO3, gtsam::SO4}>
|
||||
virtual class FrobeniusFactor : gtsam::NoiseModelFactor {
|
||||
FrobeniusFactor(size_t key1, size_t key2);
|
||||
FrobeniusFactor(size_t key1, size_t key2, gtsam::noiseModel::Base* model);
|
||||
|
||||
Vector evaluateError(const T& R1, const T& R2);
|
||||
};
|
||||
|
||||
template<T = {gtsam::SO3, gtsam::SO4}>
|
||||
virtual class FrobeniusBetweenFactor : gtsam::NoiseModelFactor {
|
||||
FrobeniusBetweenFactor(size_t key1, size_t key2, const T& R12);
|
||||
FrobeniusBetweenFactor(size_t key1, size_t key2, const T& R12, gtsam::noiseModel::Base* model);
|
||||
|
||||
Vector evaluateError(const T& R1, const T& R2);
|
||||
};
|
||||
|
||||
virtual class FrobeniusWormholeFactor : gtsam::NoiseModelFactor {
|
||||
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
|
||||
size_t p);
|
||||
FrobeniusWormholeFactor(size_t key1, size_t key2, const gtsam::Rot3& R12,
|
||||
size_t p, gtsam::noiseModel::Base* model);
|
||||
Vector evaluateError(const gtsam::SOn& Q1, const gtsam::SOn& Q2);
|
||||
};
|
||||
|
||||
//*************************************************************************
|
||||
// Navigation
|
||||
//*************************************************************************
|
||||
|
@ -2971,6 +3000,7 @@ class PreintegratedImuMeasurements {
|
|||
gtsam::Rot3 deltaRij() const;
|
||||
Vector deltaPij() const;
|
||||
Vector deltaVij() const;
|
||||
gtsam::imuBias::ConstantBias biasHat() const;
|
||||
Vector biasHatVector() const;
|
||||
gtsam::NavState predict(const gtsam::NavState& state_i,
|
||||
const gtsam::imuBias::ConstantBias& bias) const;
|
||||
|
@ -3032,6 +3062,7 @@ class PreintegratedCombinedMeasurements {
|
|||
gtsam::Rot3 deltaRij() const;
|
||||
Vector deltaPij() const;
|
||||
Vector deltaVij() const;
|
||||
gtsam::imuBias::ConstantBias biasHat() const;
|
||||
Vector biasHatVector() const;
|
||||
gtsam::NavState predict(const gtsam::NavState& state_i,
|
||||
const gtsam::imuBias::ConstantBias& bias) const;
|
||||
|
|
|
@ -17,7 +17,7 @@ if(NOT GTSAM_USE_SYSTEM_EIGEN)
|
|||
endforeach(eigen_dir)
|
||||
|
||||
if(GTSAM_WITH_EIGEN_UNSUPPORTED)
|
||||
message("-- Installing Eigen Unsupported modules")
|
||||
message(STATUS "Installing Eigen Unsupported modules")
|
||||
# do the same for the unsupported eigen folder
|
||||
file(GLOB_RECURSE unsupported_eigen_headers "${CMAKE_CURRENT_SOURCE_DIR}/Eigen/unsupported/Eigen/*.h")
|
||||
|
||||
|
|
|
@ -181,7 +181,7 @@ public:
|
|||
// Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html
|
||||
enum { NeedsToAlign = (sizeof(T) % 16) == 0 };
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
};
|
||||
|
||||
/// use this macro instead of BOOST_CLASS_EXPORT for GenericValues
|
||||
|
|
|
@ -214,7 +214,7 @@ public:
|
|||
enum { NeedsToAlign = (sizeof(M1) % 16) == 0 || (sizeof(M2) % 16) == 0
|
||||
};
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
};
|
||||
|
||||
// Define any direct product group to be a model of the multiplicative Group concept
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2020, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file make_shared.h
|
||||
* @brief make_shared trampoline function to ensure proper alignment
|
||||
* @author Fan Jiang
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/types.h>
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
namespace gtsam {
|
||||
/// An shorthand alias for accessing the ::type inside std::enable_if that can be used in a template directly
|
||||
template<bool B, class T = void>
|
||||
using enable_if_t = typename std::enable_if<B, T>::type;
|
||||
}
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Add our own `make_shared` as a layer of wrapping on `boost::make_shared`
|
||||
* This solves the problem with the stock `make_shared` that custom alignment is not respected, causing SEGFAULTs
|
||||
* at runtime, which is notoriously hard to debug.
|
||||
*
|
||||
* Explanation
|
||||
* ===============
|
||||
* The template `needs_eigen_aligned_allocator<T>::value` will evaluate to `std::true_type` if the type alias
|
||||
* `_eigen_aligned_allocator_trait = void` is present in a class, which is automatically added by the
|
||||
* `GTSAM_MAKE_ALIGNED_OPERATOR_NEW` macro.
|
||||
*
|
||||
* This function declaration will only be taken when the above condition is true, so if some object does not need to
|
||||
* be aligned, `gtsam::make_shared` will fall back to the next definition, which is a simple wrapper for
|
||||
* `boost::make_shared`.
|
||||
*
|
||||
* @tparam T The type of object being constructed
|
||||
* @tparam Args Type of the arguments of the constructor
|
||||
* @param args Arguments of the constructor
|
||||
* @return The object created as a boost::shared_ptr<T>
|
||||
*/
|
||||
template<typename T, typename ... Args>
|
||||
gtsam::enable_if_t<needs_eigen_aligned_allocator<T>::value, boost::shared_ptr<T>> make_shared(Args &&... args) {
|
||||
return boost::allocate_shared<T>(Eigen::aligned_allocator<T>(), std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
/// Fall back to the boost version if no need for alignment
|
||||
template<typename T, typename ... Args>
|
||||
gtsam::enable_if_t<!needs_eigen_aligned_allocator<T>::value, boost::shared_ptr<T>> make_shared(Args &&... args) {
|
||||
return boost::make_shared<T>(std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
}
|
|
@ -42,123 +42,218 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
// Serialization directly to strings in compressed format
|
||||
template<class T>
|
||||
std::string serialize(const T& input) {
|
||||
std::ostringstream out_archive_stream;
|
||||
/** @name Standard serialization
|
||||
* Serialization in default compressed format
|
||||
*/
|
||||
///@{
|
||||
/// serializes to a stream
|
||||
template <class T>
|
||||
void serializeToStream(const T& input, std::ostream& out_archive_stream) {
|
||||
boost::archive::text_oarchive out_archive(out_archive_stream);
|
||||
out_archive << input;
|
||||
return out_archive_stream.str();
|
||||
}
|
||||
|
||||
template<class T>
|
||||
void deserialize(const std::string& serialized, T& output) {
|
||||
std::istringstream in_archive_stream(serialized);
|
||||
/// deserializes from a stream
|
||||
template <class T>
|
||||
void deserializeFromStream(std::istream& in_archive_stream, T& output) {
|
||||
boost::archive::text_iarchive in_archive(in_archive_stream);
|
||||
in_archive >> output;
|
||||
}
|
||||
|
||||
template<class T>
|
||||
/// serializes to a string
|
||||
template <class T>
|
||||
std::string serializeToString(const T& input) {
|
||||
std::ostringstream out_archive_stream;
|
||||
serializeToStream(input, out_archive_stream);
|
||||
return out_archive_stream.str();
|
||||
}
|
||||
|
||||
/// deserializes from a string
|
||||
template <class T>
|
||||
void deserializeFromString(const std::string& serialized, T& output) {
|
||||
std::istringstream in_archive_stream(serialized);
|
||||
deserializeFromStream(in_archive_stream, output);
|
||||
}
|
||||
|
||||
/// serializes to a file
|
||||
template <class T>
|
||||
bool serializeToFile(const T& input, const std::string& filename) {
|
||||
std::ofstream out_archive_stream(filename.c_str());
|
||||
if (!out_archive_stream.is_open())
|
||||
return false;
|
||||
boost::archive::text_oarchive out_archive(out_archive_stream);
|
||||
out_archive << input;
|
||||
if (!out_archive_stream.is_open()) return false;
|
||||
serializeToStream(input, out_archive_stream);
|
||||
out_archive_stream.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class T>
|
||||
/// deserializes from a file
|
||||
template <class T>
|
||||
bool deserializeFromFile(const std::string& filename, T& output) {
|
||||
std::ifstream in_archive_stream(filename.c_str());
|
||||
if (!in_archive_stream.is_open())
|
||||
return false;
|
||||
boost::archive::text_iarchive in_archive(in_archive_stream);
|
||||
in_archive >> output;
|
||||
if (!in_archive_stream.is_open()) return false;
|
||||
deserializeFromStream(in_archive_stream, output);
|
||||
in_archive_stream.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Serialization to XML format with named structures
|
||||
template<class T>
|
||||
std::string serializeXML(const T& input, const std::string& name="data") {
|
||||
std::ostringstream out_archive_stream;
|
||||
// braces to flush out_archive as it goes out of scope before taking str()
|
||||
// fixes crash with boost 1.66-1.68
|
||||
// see https://github.com/boostorg/serialization/issues/82
|
||||
{
|
||||
boost::archive::xml_oarchive out_archive(out_archive_stream);
|
||||
out_archive << boost::serialization::make_nvp(name.c_str(), input);
|
||||
}
|
||||
return out_archive_stream.str();
|
||||
/// serializes to a string
|
||||
template <class T>
|
||||
std::string serialize(const T& input) {
|
||||
return serializeToString(input);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
void deserializeXML(const std::string& serialized, T& output, const std::string& name="data") {
|
||||
std::istringstream in_archive_stream(serialized);
|
||||
boost::archive::xml_iarchive in_archive(in_archive_stream);
|
||||
in_archive >> boost::serialization::make_nvp(name.c_str(), output);
|
||||
/// deserializes from a string
|
||||
template <class T>
|
||||
void deserialize(const std::string& serialized, T& output) {
|
||||
deserializeFromString(serialized, output);
|
||||
}
|
||||
///@}
|
||||
|
||||
template<class T>
|
||||
bool serializeToXMLFile(const T& input, const std::string& filename, const std::string& name="data") {
|
||||
std::ofstream out_archive_stream(filename.c_str());
|
||||
if (!out_archive_stream.is_open())
|
||||
return false;
|
||||
/** @name XML Serialization
|
||||
* Serialization to XML format with named structures
|
||||
*/
|
||||
///@{
|
||||
/// serializes to a stream in XML
|
||||
template <class T>
|
||||
void serializeToXMLStream(const T& input, std::ostream& out_archive_stream,
|
||||
const std::string& name = "data") {
|
||||
boost::archive::xml_oarchive out_archive(out_archive_stream);
|
||||
out_archive << boost::serialization::make_nvp(name.c_str(), input);;
|
||||
out_archive_stream.close();
|
||||
return true;
|
||||
out_archive << boost::serialization::make_nvp(name.c_str(), input);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
bool deserializeFromXMLFile(const std::string& filename, T& output, const std::string& name="data") {
|
||||
std::ifstream in_archive_stream(filename.c_str());
|
||||
if (!in_archive_stream.is_open())
|
||||
return false;
|
||||
/// deserializes from a stream in XML
|
||||
template <class T>
|
||||
void deserializeFromXMLStream(std::istream& in_archive_stream, T& output,
|
||||
const std::string& name = "data") {
|
||||
boost::archive::xml_iarchive in_archive(in_archive_stream);
|
||||
in_archive >> boost::serialization::make_nvp(name.c_str(), output);
|
||||
in_archive_stream.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Serialization to binary format with named structures
|
||||
template<class T>
|
||||
std::string serializeBinary(const T& input, const std::string& name="data") {
|
||||
/// serializes to a string in XML
|
||||
template <class T>
|
||||
std::string serializeToXMLString(const T& input,
|
||||
const std::string& name = "data") {
|
||||
std::ostringstream out_archive_stream;
|
||||
boost::archive::binary_oarchive out_archive(out_archive_stream);
|
||||
out_archive << boost::serialization::make_nvp(name.c_str(), input);
|
||||
serializeToXMLStream(input, out_archive_stream, name);
|
||||
return out_archive_stream.str();
|
||||
}
|
||||
|
||||
template<class T>
|
||||
void deserializeBinary(const std::string& serialized, T& output, const std::string& name="data") {
|
||||
/// deserializes from a string in XML
|
||||
template <class T>
|
||||
void deserializeFromXMLString(const std::string& serialized, T& output,
|
||||
const std::string& name = "data") {
|
||||
std::istringstream in_archive_stream(serialized);
|
||||
boost::archive::binary_iarchive in_archive(in_archive_stream);
|
||||
in_archive >> boost::serialization::make_nvp(name.c_str(), output);
|
||||
deserializeFromXMLStream(in_archive_stream, output, name);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
bool serializeToBinaryFile(const T& input, const std::string& filename, const std::string& name="data") {
|
||||
/// serializes to an XML file
|
||||
template <class T>
|
||||
bool serializeToXMLFile(const T& input, const std::string& filename,
|
||||
const std::string& name = "data") {
|
||||
std::ofstream out_archive_stream(filename.c_str());
|
||||
if (!out_archive_stream.is_open())
|
||||
return false;
|
||||
boost::archive::binary_oarchive out_archive(out_archive_stream);
|
||||
out_archive << boost::serialization::make_nvp(name.c_str(), input);
|
||||
if (!out_archive_stream.is_open()) return false;
|
||||
serializeToXMLStream(input, out_archive_stream, name);
|
||||
out_archive_stream.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class T>
|
||||
bool deserializeFromBinaryFile(const std::string& filename, T& output, const std::string& name="data") {
|
||||
/// deserializes from an XML file
|
||||
template <class T>
|
||||
bool deserializeFromXMLFile(const std::string& filename, T& output,
|
||||
const std::string& name = "data") {
|
||||
std::ifstream in_archive_stream(filename.c_str());
|
||||
if (!in_archive_stream.is_open())
|
||||
return false;
|
||||
boost::archive::binary_iarchive in_archive(in_archive_stream);
|
||||
in_archive >> boost::serialization::make_nvp(name.c_str(), output);
|
||||
if (!in_archive_stream.is_open()) return false;
|
||||
deserializeFromXMLStream(in_archive_stream, output, name);
|
||||
in_archive_stream.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
} // \namespace gtsam
|
||||
/// serializes to a string in XML
|
||||
template <class T>
|
||||
std::string serializeXML(const T& input,
|
||||
const std::string& name = "data") {
|
||||
return serializeToXMLString(input, name);
|
||||
}
|
||||
|
||||
/// deserializes from a string in XML
|
||||
template <class T>
|
||||
void deserializeXML(const std::string& serialized, T& output,
|
||||
const std::string& name = "data") {
|
||||
deserializeFromXMLString(serialized, output, name);
|
||||
}
|
||||
///@}
|
||||
|
||||
/** @name Binary Serialization
|
||||
* Serialization to binary format with named structures
|
||||
*/
|
||||
///@{
|
||||
/// serializes to a stream in binary
|
||||
template <class T>
|
||||
void serializeToBinaryStream(const T& input, std::ostream& out_archive_stream,
|
||||
const std::string& name = "data") {
|
||||
boost::archive::binary_oarchive out_archive(out_archive_stream);
|
||||
out_archive << boost::serialization::make_nvp(name.c_str(), input);
|
||||
}
|
||||
|
||||
/// deserializes from a stream in binary
|
||||
template <class T>
|
||||
void deserializeFromBinaryStream(std::istream& in_archive_stream, T& output,
|
||||
const std::string& name = "data") {
|
||||
boost::archive::binary_iarchive in_archive(in_archive_stream);
|
||||
in_archive >> boost::serialization::make_nvp(name.c_str(), output);
|
||||
}
|
||||
|
||||
/// serializes to a string in binary
|
||||
template <class T>
|
||||
std::string serializeToBinaryString(const T& input,
|
||||
const std::string& name = "data") {
|
||||
std::ostringstream out_archive_stream;
|
||||
serializeToBinaryStream(input, out_archive_stream, name);
|
||||
return out_archive_stream.str();
|
||||
}
|
||||
|
||||
/// deserializes from a string in binary
|
||||
template <class T>
|
||||
void deserializeFromBinaryString(const std::string& serialized, T& output,
|
||||
const std::string& name = "data") {
|
||||
std::istringstream in_archive_stream(serialized);
|
||||
deserializeFromBinaryStream(in_archive_stream, output, name);
|
||||
}
|
||||
|
||||
/// serializes to a binary file
|
||||
template <class T>
|
||||
bool serializeToBinaryFile(const T& input, const std::string& filename,
|
||||
const std::string& name = "data") {
|
||||
std::ofstream out_archive_stream(filename.c_str());
|
||||
if (!out_archive_stream.is_open()) return false;
|
||||
serializeToBinaryStream(input, out_archive_stream, name);
|
||||
out_archive_stream.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
/// deserializes from a binary file
|
||||
template <class T>
|
||||
bool deserializeFromBinaryFile(const std::string& filename, T& output,
|
||||
const std::string& name = "data") {
|
||||
std::ifstream in_archive_stream(filename.c_str());
|
||||
if (!in_archive_stream.is_open()) return false;
|
||||
deserializeFromBinaryStream(in_archive_stream, output, name);
|
||||
in_archive_stream.close();
|
||||
return true;
|
||||
}
|
||||
|
||||
/// serializes to a string in binary
|
||||
template <class T>
|
||||
std::string serializeBinary(const T& input,
|
||||
const std::string& name = "data") {
|
||||
return serializeToBinaryString(input, name);
|
||||
}
|
||||
|
||||
/// deserializes from a string in binary
|
||||
template <class T>
|
||||
void deserializeBinary(const std::string& serialized, T& output,
|
||||
const std::string& name = "data") {
|
||||
deserializeFromBinaryString(serialized, output, name);
|
||||
}
|
||||
///@}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <gtsam/base/serialization.h>
|
||||
|
||||
#include <boost/serialization/serialization.hpp>
|
||||
#include <boost/filesystem.hpp>
|
||||
|
||||
|
||||
// whether to print the serialized text to stdout
|
||||
|
@ -40,22 +41,37 @@ T create() {
|
|||
return T();
|
||||
}
|
||||
|
||||
// Creates or empties a folder in the build folder and returns the relative path
|
||||
boost::filesystem::path resetFilesystem(
|
||||
boost::filesystem::path folder = "actual") {
|
||||
boost::filesystem::remove_all(folder);
|
||||
boost::filesystem::create_directory(folder);
|
||||
return folder;
|
||||
}
|
||||
|
||||
// Templated round-trip serialization
|
||||
template<class T>
|
||||
void roundtrip(const T& input, T& output) {
|
||||
// Serialize
|
||||
std::string serialized = serialize(input);
|
||||
if (verbose) std::cout << serialized << std::endl << std::endl;
|
||||
|
||||
deserialize(serialized, output);
|
||||
}
|
||||
|
||||
// This version requires equality operator
|
||||
// Templated round-trip serialization using a file
|
||||
template<class T>
|
||||
void roundtripFile(const T& input, T& output) {
|
||||
boost::filesystem::path path = resetFilesystem()/"graph.dat";
|
||||
serializeToFile(input, path.string());
|
||||
deserializeFromFile(path.string(), output);
|
||||
}
|
||||
|
||||
// This version requires equality operator and uses string and file round-trips
|
||||
template<class T>
|
||||
bool equality(const T& input = T()) {
|
||||
T output = create<T>();
|
||||
T output = create<T>(), outputf = create<T>();
|
||||
roundtrip<T>(input,output);
|
||||
return input==output;
|
||||
roundtripFile<T>(input,outputf);
|
||||
return (input==output) && (input==outputf);
|
||||
}
|
||||
|
||||
// This version requires Testable
|
||||
|
@ -77,20 +93,26 @@ bool equalsDereferenced(const T& input) {
|
|||
// Templated round-trip serialization using XML
|
||||
template<class T>
|
||||
void roundtripXML(const T& input, T& output) {
|
||||
// Serialize
|
||||
std::string serialized = serializeXML<T>(input);
|
||||
if (verbose) std::cout << serialized << std::endl << std::endl;
|
||||
|
||||
// De-serialize
|
||||
deserializeXML(serialized, output);
|
||||
}
|
||||
|
||||
// Templated round-trip serialization using XML File
|
||||
template<class T>
|
||||
void roundtripXMLFile(const T& input, T& output) {
|
||||
boost::filesystem::path path = resetFilesystem()/"graph.xml";
|
||||
serializeToXMLFile(input, path.string());
|
||||
deserializeFromXMLFile(path.string(), output);
|
||||
}
|
||||
|
||||
// This version requires equality operator
|
||||
template<class T>
|
||||
bool equalityXML(const T& input = T()) {
|
||||
T output = create<T>();
|
||||
T output = create<T>(), outputf = create<T>();
|
||||
roundtripXML<T>(input,output);
|
||||
return input==output;
|
||||
roundtripXMLFile<T>(input,outputf);
|
||||
return (input==output) && (input==outputf);
|
||||
}
|
||||
|
||||
// This version requires Testable
|
||||
|
@ -112,20 +134,26 @@ bool equalsDereferencedXML(const T& input = T()) {
|
|||
// Templated round-trip serialization using XML
|
||||
template<class T>
|
||||
void roundtripBinary(const T& input, T& output) {
|
||||
// Serialize
|
||||
std::string serialized = serializeBinary<T>(input);
|
||||
if (verbose) std::cout << serialized << std::endl << std::endl;
|
||||
|
||||
// De-serialize
|
||||
deserializeBinary(serialized, output);
|
||||
}
|
||||
|
||||
// Templated round-trip serialization using Binary file
|
||||
template<class T>
|
||||
void roundtripBinaryFile(const T& input, T& output) {
|
||||
boost::filesystem::path path = resetFilesystem()/"graph.bin";
|
||||
serializeToBinaryFile(input, path.string());
|
||||
deserializeFromBinaryFile(path.string(), output);
|
||||
}
|
||||
|
||||
// This version requires equality operator
|
||||
template<class T>
|
||||
bool equalityBinary(const T& input = T()) {
|
||||
T output = create<T>();
|
||||
T output = create<T>(), outputf = create<T>();
|
||||
roundtripBinary<T>(input,output);
|
||||
return input==output;
|
||||
roundtripBinaryFile<T>(input,outputf);
|
||||
return (input==output) && (input==outputf);
|
||||
}
|
||||
|
||||
// This version requires Testable
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <cstdint>
|
||||
|
||||
#include <exception>
|
||||
#include <string>
|
||||
|
||||
#ifdef GTSAM_USE_TBB
|
||||
#include <tbb/scalable_allocator.h>
|
||||
|
@ -54,7 +55,7 @@
|
|||
namespace gtsam {
|
||||
|
||||
/// Function to demangle type name of variable, e.g. demangle(typeid(x).name())
|
||||
std::string demangle(const char* name);
|
||||
std::string GTSAM_EXPORT demangle(const char* name);
|
||||
|
||||
/// Integer nonlinear key type
|
||||
typedef std::uint64_t Key;
|
||||
|
@ -230,3 +231,50 @@ namespace std {
|
|||
#ifdef ERROR
|
||||
#undef ERROR
|
||||
#endif
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/// Convenience void_t as we assume C++11, it will not conflict the std one in C++17 as this is in `gtsam::`
|
||||
template<typename ...> using void_t = void;
|
||||
|
||||
/**
|
||||
* A SFINAE trait to mark classes that need special alignment.
|
||||
*
|
||||
* This is required to make boost::make_shared and etc respect alignment, which is essential for the Python
|
||||
* wrappers to work properly.
|
||||
*
|
||||
* Explanation
|
||||
* =============
|
||||
* When a GTSAM type is not declared with the type alias `_eigen_aligned_allocator_trait = void`, the first template
|
||||
* will be taken so `needs_eigen_aligned_allocator` will be resolved to `std::false_type`.
|
||||
*
|
||||
* Otherwise, it will resolve to the second template, which will be resolved to `std::true_type`.
|
||||
*
|
||||
* Please refer to `gtsam/base/make_shared.h` for an example.
|
||||
*/
|
||||
template<typename, typename = void_t<>>
|
||||
struct needs_eigen_aligned_allocator : std::false_type {
|
||||
};
|
||||
template<typename T>
|
||||
struct needs_eigen_aligned_allocator<T, void_t<typename T::_eigen_aligned_allocator_trait>> : std::true_type {
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* This marks a GTSAM object to require alignment. With this macro an object will automatically be allocated in aligned
|
||||
* memory when one uses `gtsam::make_shared`. It reduces future misalignment problems that is hard to debug.
|
||||
* See https://eigen.tuxfamily.org/dox/group__DenseMatrixManipulation__Alignement.html for detailed explanation.
|
||||
*/
|
||||
#define GTSAM_MAKE_ALIGNED_OPERATOR_NEW \
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW \
|
||||
using _eigen_aligned_allocator_trait = void;
|
||||
|
||||
/**
|
||||
* This marks a GTSAM object to require alignment. With this macro an object will automatically be allocated in aligned
|
||||
* memory when one uses `gtsam::make_shared`. It reduces future misalignment problems that is hard to debug.
|
||||
* See https://eigen.tuxfamily.org/dox/group__DenseMatrixManipulation__Alignement.html for detailed explanation.
|
||||
*/
|
||||
#define GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign) \
|
||||
using _eigen_aligned_allocator_trait = void;
|
||||
|
|
|
@ -162,7 +162,7 @@ private:
|
|||
NeedsToAlign = (sizeof(B) % 16) == 0 || (sizeof(R) % 16) == 0
|
||||
};
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
};
|
||||
|
||||
// Declare this to be both Testable and a Manifold
|
||||
|
|
|
@ -319,7 +319,7 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
template<class CAMERA>
|
||||
|
|
|
@ -212,7 +212,7 @@ class EssentialMatrix {
|
|||
/// @}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
template<>
|
||||
|
|
|
@ -325,7 +325,7 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
// manifold traits
|
||||
|
|
|
@ -107,9 +107,9 @@ public:
|
|||
|
||||
// If needed, apply chain rule
|
||||
if (Dpose)
|
||||
*Dpose = Dpi_pn * *Dpose;
|
||||
*Dpose = Dpi_pn * *Dpose;
|
||||
if (Dpoint)
|
||||
*Dpoint = Dpi_pn * *Dpoint;
|
||||
*Dpoint = Dpi_pn * *Dpoint;
|
||||
|
||||
return pi;
|
||||
}
|
||||
|
@ -222,7 +222,7 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
// end of class PinholeBaseK
|
||||
|
||||
|
@ -425,7 +425,7 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
// end of class PinholePose
|
||||
|
||||
|
|
|
@ -317,7 +317,7 @@ public:
|
|||
|
||||
public:
|
||||
// Align for Point2, which is either derived from, or is typedef, of Vector2
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
}; // Pose2
|
||||
|
||||
/** specialization for pose2 wedge function (generic template in Lie.h) */
|
||||
|
|
|
@ -355,7 +355,7 @@ public:
|
|||
#ifdef GTSAM_USE_QUATERNIONS
|
||||
// Align if we are using Quaternions
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
#endif
|
||||
};
|
||||
// Pose3 class
|
||||
|
|
|
@ -544,7 +544,7 @@ namespace gtsam {
|
|||
#ifdef GTSAM_USE_QUATERNIONS
|
||||
// only align if quaternion, Matrix3 has no alignment requirements
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
|
||||
#include <gtsam/base/Lie.h>
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/base/make_shared.h>
|
||||
#include <gtsam/dllexport.h>
|
||||
|
||||
#include <Eigen/Core>
|
||||
|
||||
#include <iostream> // TODO(frank): how to avoid?
|
||||
|
@ -54,7 +54,7 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
|
|||
using VectorN2 = Eigen::Matrix<double, internal::NSquaredSO(N), 1>;
|
||||
using MatrixDD = Eigen::Matrix<double, dimension, dimension>;
|
||||
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(true)
|
||||
|
||||
protected:
|
||||
MatrixNN matrix_; ///< Rotation matrix
|
||||
|
@ -292,6 +292,10 @@ class SO : public LieGroup<SO<N>, internal::DimensionSO(N)> {
|
|||
boost::none) const;
|
||||
/// @}
|
||||
|
||||
template <class Archive>
|
||||
friend void save(Archive&, SO&, const unsigned int);
|
||||
template <class Archive>
|
||||
friend void load(Archive&, SO&, const unsigned int);
|
||||
template <class Archive>
|
||||
friend void serialize(Archive&, SO&, const unsigned int);
|
||||
friend class boost::serialization::access;
|
||||
|
@ -329,6 +333,16 @@ template <>
|
|||
SOn LieGroup<SOn, Eigen::Dynamic>::between(const SOn& g, DynamicJacobian H1,
|
||||
DynamicJacobian H2) const;
|
||||
|
||||
/** Serialization function */
|
||||
template<class Archive>
|
||||
void serialize(
|
||||
Archive& ar, SOn& Q,
|
||||
const unsigned int file_version
|
||||
) {
|
||||
Matrix& M = Q.matrix_;
|
||||
ar& M;
|
||||
}
|
||||
|
||||
/*
|
||||
* Define the traits. internal::LieGroup provides both Lie group and Testable
|
||||
*/
|
||||
|
|
|
@ -90,6 +90,8 @@ public:
|
|||
/// Copy assignment
|
||||
Unit3& operator=(const Unit3 & u) {
|
||||
p_ = u.p_;
|
||||
B_ = u.B_;
|
||||
H_B_ = u.H_B_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
@ -214,7 +216,7 @@ private:
|
|||
/// @}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
// Define GTSAM traits
|
||||
|
|
|
@ -484,6 +484,15 @@ TEST(Unit3, ErrorBetweenFactor) {
|
|||
}
|
||||
}
|
||||
|
||||
TEST(Unit3, CopyAssign) {
|
||||
Unit3 p{1, 0.2, 0.3};
|
||||
|
||||
EXPECT(p.error(p).isZero());
|
||||
|
||||
p = Unit3{-1, 2, 8};
|
||||
EXPECT(p.error(p).isZero());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(actualH, Serialization) {
|
||||
Unit3 p(0, 1, 0);
|
||||
|
|
|
@ -215,7 +215,7 @@ struct CameraProjectionMatrix {
|
|||
private:
|
||||
const Matrix3 K_;
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -139,7 +139,7 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
/// traits
|
||||
|
@ -219,7 +219,7 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
/// traits
|
||||
|
|
|
@ -100,7 +100,7 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
|
||||
|
@ -210,7 +210,7 @@ public:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -332,7 +332,7 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
// class CombinedImuFactor
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ private:
|
|||
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
/// @}
|
||||
|
||||
}; // ConstantBias class
|
||||
|
|
|
@ -69,7 +69,7 @@ struct GTSAM_EXPORT PreintegratedRotationParams {
|
|||
#ifdef GTSAM_USE_QUATERNIONS
|
||||
// Align if we are using Quaternions
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -182,7 +182,7 @@ class GTSAM_EXPORT PreintegratedRotation {
|
|||
#ifdef GTSAM_USE_QUATERNIONS
|
||||
// Align if we are using Quaternions
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -214,7 +214,7 @@ class GTSAM_EXPORT PreintegrationBase {
|
|||
#endif
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
} /// namespace gtsam
|
||||
|
|
|
@ -84,7 +84,7 @@ protected:
|
|||
#ifdef GTSAM_USE_QUATERNIONS
|
||||
// Align if we are using Quaternions
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
} /// namespace gtsam
|
||||
|
|
|
@ -209,7 +209,7 @@ private:
|
|||
// Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html
|
||||
enum { NeedsToAlign = (sizeof(T) % 16) == 0 };
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
};
|
||||
// ExpressionFactor
|
||||
|
||||
|
|
|
@ -175,7 +175,7 @@ public:
|
|||
|
||||
/// @}
|
||||
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
private:
|
||||
|
||||
|
@ -265,7 +265,7 @@ public:
|
|||
traits<X>::Print(value_, "Value");
|
||||
}
|
||||
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
private:
|
||||
|
||||
|
@ -331,7 +331,7 @@ public:
|
|||
return traits<X>::Local(x1,x2);
|
||||
}
|
||||
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
private:
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ namespace gtsam {
|
|||
// Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html
|
||||
enum { NeedsToAlign = (sizeof(T) % 16) == 0 };
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
};
|
||||
|
||||
} /// namespace gtsam
|
||||
|
|
|
@ -169,6 +169,12 @@ class ExecutionTrace {
|
|||
content.ptr->reverseAD2(dTdA, jacobians);
|
||||
}
|
||||
|
||||
~ExecutionTrace() {
|
||||
if (kind == Function) {
|
||||
content.ptr->~CallRecord<Dim>();
|
||||
}
|
||||
}
|
||||
|
||||
/// Define type so we can apply it as a meta-function
|
||||
typedef ExecutionTrace<T> type;
|
||||
};
|
||||
|
|
|
@ -150,7 +150,7 @@ public:
|
|||
return constant_;
|
||||
}
|
||||
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
* @brief unit tests for Expression internals
|
||||
*/
|
||||
|
||||
#include <gtsam/nonlinear/internal/CallRecord.h>
|
||||
#include <gtsam/nonlinear/internal/ExecutionTrace.h>
|
||||
#include <gtsam/geometry/Point2.h>
|
||||
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
#pragma once
|
||||
|
||||
/**
|
||||
* @file TranslationFactor.h
|
||||
* @author Frank Dellaert
|
||||
* @date March 2020
|
||||
* @brief Binary factor for a relative translation direction measurement.
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Unit3.h>
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Binary factor for a relative translation direction measurement
|
||||
* w_aZb. The measurement equation is
|
||||
* w_aZb = Unit3(Tb - Ta)
|
||||
* i.e., w_aZb is the translation direction from frame A to B, in world
|
||||
* coordinates. Although Unit3 instances live on a manifold, following
|
||||
* Wilson14eccv_1DSfM.pdf error we compute the *chordal distance* in the
|
||||
* ambient world coordinate frame:
|
||||
* normalized(Tb - Ta) - w_aZb.point3()
|
||||
*
|
||||
* @addtogroup SFM
|
||||
*/
|
||||
class TranslationFactor : public NoiseModelFactor2<Point3, Point3> {
|
||||
private:
|
||||
typedef NoiseModelFactor2<Point3, Point3> Base;
|
||||
Point3 measured_w_aZb_;
|
||||
|
||||
public:
|
||||
/// default constructor
|
||||
TranslationFactor() {}
|
||||
|
||||
TranslationFactor(Key a, Key b, const Unit3& w_aZb,
|
||||
const SharedNoiseModel& noiseModel)
|
||||
: Base(noiseModel, a, b), measured_w_aZb_(w_aZb.point3()) {}
|
||||
|
||||
/**
|
||||
* @brief Caclulate error: (norm(Tb - Ta) - measurement)
|
||||
* where Tb and Ta are Point3 translations and measurement is
|
||||
* the Unit3 translation direction from a to b.
|
||||
*
|
||||
* @param Ta translation for key a
|
||||
* @param Tb translation for key b
|
||||
* @param H1 optional jacobian in Ta
|
||||
* @param H2 optional jacobian in Tb
|
||||
* @return * Vector
|
||||
*/
|
||||
Vector evaluateError(
|
||||
const Point3& Ta, const Point3& Tb,
|
||||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const override {
|
||||
const Point3 dir = Tb - Ta;
|
||||
Matrix33 H_predicted_dir;
|
||||
const Point3 predicted = normalize(dir, H1 || H2 ? &H_predicted_dir : nullptr);
|
||||
if (H1) *H1 = -H_predicted_dir;
|
||||
if (H2) *H2 = H_predicted_dir;
|
||||
return predicted - measured_w_aZb_;
|
||||
}
|
||||
|
||||
private:
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||
ar& boost::serialization::make_nvp(
|
||||
"Base", boost::serialization::base_object<Base>(*this));
|
||||
}
|
||||
}; // \ TranslationFactor
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,103 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file TranslationRecovery.h
|
||||
* @author Frank Dellaert
|
||||
* @date March 2020
|
||||
* @brief test recovering translations when rotations are given.
|
||||
*/
|
||||
|
||||
#include <gtsam/sfm/TranslationRecovery.h>
|
||||
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/Unit3.h>
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/sfm/TranslationFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
|
||||
using namespace gtsam;
|
||||
using namespace std;
|
||||
|
||||
NonlinearFactorGraph TranslationRecovery::buildGraph() const {
|
||||
auto noiseModel = noiseModel::Isotropic::Sigma(3, 0.01);
|
||||
NonlinearFactorGraph graph;
|
||||
|
||||
// Add all relative translation edges
|
||||
for (auto edge : relativeTranslations_) {
|
||||
Key a, b;
|
||||
tie(a, b) = edge.first;
|
||||
const Unit3 w_aZb = edge.second;
|
||||
graph.emplace_shared<TranslationFactor>(a, b, w_aZb, noiseModel);
|
||||
}
|
||||
|
||||
return graph;
|
||||
}
|
||||
|
||||
void TranslationRecovery::addPrior(const double scale,
|
||||
NonlinearFactorGraph* graph) const {
|
||||
auto noiseModel = noiseModel::Isotropic::Sigma(3, 0.01);
|
||||
auto edge = relativeTranslations_.begin();
|
||||
Key a, b;
|
||||
tie(a, b) = edge->first;
|
||||
const Unit3 w_aZb = edge->second;
|
||||
graph->emplace_shared<PriorFactor<Point3> >(a, Point3(0, 0, 0), noiseModel);
|
||||
graph->emplace_shared<PriorFactor<Point3> >(b, scale * w_aZb.point3(),
|
||||
noiseModel);
|
||||
}
|
||||
|
||||
Values TranslationRecovery::initalizeRandomly() const {
|
||||
// Create a lambda expression that checks whether value exists and randomly
|
||||
// initializes if not.
|
||||
Values initial;
|
||||
auto insert = [&initial](Key j) {
|
||||
if (!initial.exists(j)) {
|
||||
initial.insert<Point3>(j, Vector3::Random());
|
||||
}
|
||||
};
|
||||
|
||||
// Loop over measurements and add a random translation
|
||||
for (auto edge : relativeTranslations_) {
|
||||
Key a, b;
|
||||
tie(a, b) = edge.first;
|
||||
insert(a);
|
||||
insert(b);
|
||||
}
|
||||
return initial;
|
||||
}
|
||||
|
||||
Values TranslationRecovery::run(const double scale) const {
|
||||
auto graph = buildGraph();
|
||||
addPrior(scale, &graph);
|
||||
const Values initial = initalizeRandomly();
|
||||
LevenbergMarquardtOptimizer lm(graph, initial, params_);
|
||||
Values result = lm.optimize();
|
||||
return result;
|
||||
}
|
||||
|
||||
TranslationRecovery::TranslationEdges TranslationRecovery::SimulateMeasurements(
|
||||
const Values& poses, const vector<KeyPair>& edges) {
|
||||
TranslationEdges relativeTranslations;
|
||||
for (auto edge : edges) {
|
||||
Key a, b;
|
||||
tie(a, b) = edge;
|
||||
const Pose3 wTa = poses.at<Pose3>(a), wTb = poses.at<Pose3>(b);
|
||||
const Point3 Ta = wTa.translation(), Tb = wTb.translation();
|
||||
const Unit3 w_aZb(Tb - Ta);
|
||||
relativeTranslations[edge] = w_aZb;
|
||||
}
|
||||
return relativeTranslations;
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file TranslationRecovery.h
|
||||
* @author Frank Dellaert
|
||||
* @date March 2020
|
||||
* @brief test recovering translations when rotations are given.
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/Unit3.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
|
||||
#include <map>
|
||||
#include <utility>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
// Set up an optimization problem for the unknown translations Ti in the world
|
||||
// coordinate frame, given the known camera attitudes wRi with respect to the
|
||||
// world frame, and a set of (noisy) translation directions of type Unit3,
|
||||
// w_aZb. The measurement equation is
|
||||
// w_aZb = Unit3(Tb - Ta) (1)
|
||||
// i.e., w_aZb is the translation direction from frame A to B, in world
|
||||
// coordinates. Although Unit3 instances live on a manifold, following
|
||||
// Wilson14eccv_1DSfM.pdf error we compute the *chordal distance* in the
|
||||
// ambient world coordinate frame.
|
||||
//
|
||||
// It is clear that we cannot recover the scale, nor the absolute position,
|
||||
// so the gauge freedom in this case is 3 + 1 = 4. We fix these by taking fixing
|
||||
// the translations Ta and Tb associated with the first measurement w_aZb,
|
||||
// clamping them to their initial values as given to this method. If no initial
|
||||
// values are given, we use the origin for Tb and set Tb to make (1) come
|
||||
// through, i.e.,
|
||||
// Tb = s * wRa * Point3(w_aZb) (2)
|
||||
// where s is an arbitrary scale that can be supplied, default 1.0. Hence, two
|
||||
// versions are supplied below corresponding to whether we have initial values
|
||||
// or not.
|
||||
class TranslationRecovery {
|
||||
public:
|
||||
using KeyPair = std::pair<Key, Key>;
|
||||
using TranslationEdges = std::map<KeyPair, Unit3>;
|
||||
|
||||
private:
|
||||
TranslationEdges relativeTranslations_;
|
||||
LevenbergMarquardtParams params_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new Translation Recovery object
|
||||
*
|
||||
* @param relativeTranslations the relative translations, in world coordinate
|
||||
* frames, indexed in a map by a pair of Pose keys.
|
||||
* @param lmParams (optional) gtsam::LavenbergMarquardtParams that can be
|
||||
* used to modify the parameters for the LM optimizer. By default, uses the
|
||||
* default LM parameters.
|
||||
*/
|
||||
TranslationRecovery(const TranslationEdges& relativeTranslations,
|
||||
const LevenbergMarquardtParams& lmParams = LevenbergMarquardtParams())
|
||||
: relativeTranslations_(relativeTranslations), params_(lmParams) {
|
||||
params_.setVerbosityLM("Summary");
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Build the factor graph to do the optimization.
|
||||
*
|
||||
* @return NonlinearFactorGraph
|
||||
*/
|
||||
NonlinearFactorGraph buildGraph() const;
|
||||
|
||||
/**
|
||||
* @brief Add priors on ednpoints of first measurement edge.
|
||||
*
|
||||
* @param scale scale for first relative translation which fixes gauge.
|
||||
* @param graph factor graph to which prior is added.
|
||||
*/
|
||||
void addPrior(const double scale, NonlinearFactorGraph* graph) const;
|
||||
|
||||
/**
|
||||
* @brief Create random initial translations.
|
||||
*
|
||||
* @return Values
|
||||
*/
|
||||
Values initalizeRandomly() const;
|
||||
|
||||
/**
|
||||
* @brief Build and optimize factor graph.
|
||||
*
|
||||
* @param scale scale for first relative translation which fixes gauge.
|
||||
* @return Values
|
||||
*/
|
||||
Values run(const double scale = 1.0) const;
|
||||
|
||||
/**
|
||||
* @brief Simulate translation direction measurements
|
||||
*
|
||||
* @param poses SE(3) ground truth poses stored as Values
|
||||
* @param edges pairs (a,b) for which a measurement w_aZb will be generated.
|
||||
* @return TranslationEdges map from a KeyPair to the simulated Unit3
|
||||
* translation direction measurement between the cameras in KeyPair.
|
||||
*/
|
||||
static TranslationEdges SimulateMeasurements(
|
||||
const Values& poses, const std::vector<KeyPair>& edges);
|
||||
};
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,108 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testTranslationFactor.cpp
|
||||
* @brief Unit tests for TranslationFactor Class
|
||||
* @author Frank dellaert
|
||||
* @date March 2020
|
||||
*/
|
||||
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/sfm/TranslationFactor.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
// Create a noise model for the chordal error
|
||||
static SharedNoiseModel model(noiseModel::Isotropic::Sigma(3, 0.05));
|
||||
|
||||
// Keys are deliberately *not* in sorted order to test that case.
|
||||
static const Key kKey1(2), kKey2(1);
|
||||
static const Unit3 kMeasured(1, 0, 0);
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(TranslationFactor, Constructor) {
|
||||
TranslationFactor factor(kKey1, kKey2, kMeasured, model);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(TranslationFactor, ZeroError) {
|
||||
// Create a factor
|
||||
TranslationFactor factor(kKey1, kKey2, kMeasured, model);
|
||||
|
||||
// Set the linearization
|
||||
Point3 T1(1, 0, 0), T2(2, 0, 0);
|
||||
|
||||
// Use the factor to calculate the error
|
||||
Vector actualError(factor.evaluateError(T1, T2));
|
||||
|
||||
// Verify we get the expected error
|
||||
Vector expected = (Vector3() << 0, 0, 0).finished();
|
||||
EXPECT(assert_equal(expected, actualError, 1e-9));
|
||||
|
||||
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(TranslationFactor, NonZeroError) {
|
||||
// create a factor
|
||||
TranslationFactor factor(kKey1, kKey2, kMeasured, model);
|
||||
|
||||
// set the linearization
|
||||
Point3 T1(0, 1, 1), T2(0, 2, 2);
|
||||
|
||||
// use the factor to calculate the error
|
||||
Vector actualError(factor.evaluateError(T1, T2));
|
||||
|
||||
// verify we get the expected error
|
||||
Vector expected = (Vector3() << -1, 1/sqrt(2), 1/sqrt(2)).finished();
|
||||
EXPECT(assert_equal(expected, actualError, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector factorError(const Point3& T1, const Point3& T2,
|
||||
const TranslationFactor& factor) {
|
||||
return factor.evaluateError(T1, T2);
|
||||
}
|
||||
|
||||
TEST(TranslationFactor, Jacobian) {
|
||||
// Create a factor
|
||||
TranslationFactor factor(kKey1, kKey2, kMeasured, model);
|
||||
|
||||
// Set the linearization
|
||||
Point3 T1(1, 0, 0), T2(2, 0, 0);
|
||||
|
||||
// Use the factor to calculate the Jacobians
|
||||
Matrix H1Actual, H2Actual;
|
||||
factor.evaluateError(T1, T2, H1Actual, H2Actual);
|
||||
|
||||
// Use numerical derivatives to calculate the Jacobians
|
||||
Matrix H1Expected, H2Expected;
|
||||
H1Expected = numericalDerivative11<Vector, Point3>(
|
||||
boost::bind(&factorError, _1, T2, factor), T1);
|
||||
H2Expected = numericalDerivative11<Vector, Point3>(
|
||||
boost::bind(&factorError, T1, _1, factor), T2);
|
||||
|
||||
// Verify the Jacobians are correct
|
||||
EXPECT(assert_equal(H1Expected, H1Actual, 1e-9));
|
||||
EXPECT(assert_equal(H2Expected, H2Actual, 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -126,7 +126,7 @@ namespace gtsam {
|
|||
// Alignment, see https://eigen.tuxfamily.org/dox/group__TopicStructHavingEigenMembers.html
|
||||
enum { NeedsToAlign = (sizeof(VALUE) % 16) == 0 };
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW_IF(NeedsToAlign)
|
||||
}; // \class BetweenFactor
|
||||
|
||||
/// traits
|
||||
|
|
|
@ -105,7 +105,7 @@ private:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
// \class EssentialMatrixConstraint
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ public:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -201,7 +201,7 @@ public:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
// EssentialMatrixFactor2
|
||||
|
||||
|
@ -286,7 +286,7 @@ public:
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
// EssentialMatrixFactor3
|
||||
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file FrobeniusFactor.cpp
|
||||
* @date March 2019
|
||||
* @author Frank Dellaert
|
||||
* @brief Various factors that minimize some Frobenius norm
|
||||
*/
|
||||
|
||||
#include <gtsam/slam/FrobeniusFactor.h>
|
||||
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
//******************************************************************************
|
||||
boost::shared_ptr<noiseModel::Isotropic> ConvertPose3NoiseModel(
|
||||
const SharedNoiseModel& model, size_t d, bool defaultToUnit) {
|
||||
double sigma = 1.0;
|
||||
if (model != nullptr) {
|
||||
if (model->dim() != 6) {
|
||||
if (!defaultToUnit)
|
||||
throw std::runtime_error("Can only convert Pose3 noise models");
|
||||
} else {
|
||||
auto sigmas = model->sigmas().head(3).eval();
|
||||
if (sigmas(1) != sigmas(0) || sigmas(2) != sigmas(0)) {
|
||||
if (!defaultToUnit)
|
||||
throw std::runtime_error("Can only convert isotropic rotation noise");
|
||||
} else {
|
||||
sigma = sigmas(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
return noiseModel::Isotropic::Sigma(d, sigma);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
FrobeniusWormholeFactor::FrobeniusWormholeFactor(Key j1, Key j2, const Rot3& R12,
|
||||
size_t p,
|
||||
const SharedNoiseModel& model)
|
||||
: NoiseModelFactor2<SOn, SOn>(ConvertPose3NoiseModel(model, p * 3), j1, j2),
|
||||
M_(R12.matrix()), // 3*3 in all cases
|
||||
p_(p), // 4 for SO(4)
|
||||
pp_(p * p), // 16 for SO(4)
|
||||
dimension_(SOn::Dimension(p)), // 6 for SO(4)
|
||||
G_(pp_, dimension_) // 16*6 for SO(4)
|
||||
{
|
||||
// Calculate G matrix of vectorized generators
|
||||
Matrix Z = Matrix::Zero(p, p);
|
||||
for (size_t j = 0; j < dimension_; j++) {
|
||||
const auto X = SOn::Hat(Eigen::VectorXd::Unit(dimension_, j));
|
||||
G_.col(j) = Eigen::Map<const Matrix>(X.data(), pp_, 1);
|
||||
}
|
||||
assert(noiseModel()->dim() == 3 * p_);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
Vector FrobeniusWormholeFactor::evaluateError(
|
||||
const SOn& Q1, const SOn& Q2, boost::optional<Matrix&> H1,
|
||||
boost::optional<Matrix&> H2) const {
|
||||
gttic(FrobeniusWormholeFactorP_evaluateError);
|
||||
|
||||
const Matrix& M1 = Q1.matrix();
|
||||
const Matrix& M2 = Q2.matrix();
|
||||
assert(M1.rows() == p_ && M2.rows() == p_);
|
||||
|
||||
const size_t dim = 3 * p_; // Stiefel manifold dimension
|
||||
Vector fQ2(dim), hQ1(dim);
|
||||
|
||||
// Vectorize and extract only d leftmost columns, i.e. vec(M2*P)
|
||||
fQ2 << Eigen::Map<const Matrix>(M2.data(), dim, 1);
|
||||
|
||||
// Vectorize M1*P*R12
|
||||
const Matrix Q1PR12 = M1.leftCols<3>() * M_;
|
||||
hQ1 << Eigen::Map<const Matrix>(Q1PR12.data(), dim, 1);
|
||||
|
||||
// If asked, calculate Jacobian as (M \otimes M1) * G
|
||||
if (H1) {
|
||||
const size_t p2 = 2 * p_;
|
||||
Matrix RPxQ = Matrix::Zero(dim, pp_);
|
||||
RPxQ.block(0, 0, p_, dim) << M1 * M_(0, 0), M1 * M_(1, 0), M1 * M_(2, 0);
|
||||
RPxQ.block(p_, 0, p_, dim) << M1 * M_(0, 1), M1 * M_(1, 1), M1 * M_(2, 1);
|
||||
RPxQ.block(p2, 0, p_, dim) << M1 * M_(0, 2), M1 * M_(1, 2), M1 * M_(2, 2);
|
||||
*H1 = -RPxQ * G_;
|
||||
}
|
||||
if (H2) {
|
||||
const size_t p2 = 2 * p_;
|
||||
Matrix PxQ = Matrix::Zero(dim, pp_);
|
||||
PxQ.block(0, 0, p_, p_) = M2;
|
||||
PxQ.block(p_, p_, p_, p_) = M2;
|
||||
PxQ.block(p2, p2, p_, p_) = M2;
|
||||
*H2 = PxQ * G_;
|
||||
}
|
||||
|
||||
return fQ2 - hQ1;
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,145 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file FrobeniusFactor.h
|
||||
* @date March 2019
|
||||
* @author Frank Dellaert
|
||||
* @brief Various factors that minimize some Frobenius norm
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/geometry/SOn.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* When creating (any) FrobeniusFactor we convert a 6-dimensional Pose3
|
||||
* BetweenFactor noise model into an 9 or 16-dimensional isotropic noise
|
||||
* model used to weight the Frobenius norm. If the noise model passed is
|
||||
* null we return a Dim-dimensional isotropic noise model with sigma=1.0. If
|
||||
* not, we we check if the 3-dimensional noise model on rotations is
|
||||
* isotropic. If it is, we extend to 'Dim' dimensions, otherwise we throw an
|
||||
* error. If defaultToUnit == false throws an exception on unexepcted input.
|
||||
*/
|
||||
GTSAM_EXPORT boost::shared_ptr<noiseModel::Isotropic> ConvertPose3NoiseModel(
|
||||
const SharedNoiseModel& model, size_t d, bool defaultToUnit = true);
|
||||
|
||||
/**
|
||||
* FrobeniusPrior calculates the Frobenius norm between a given matrix and an
|
||||
* element of SO(3) or SO(4).
|
||||
*/
|
||||
template <class Rot>
|
||||
class FrobeniusPrior : public NoiseModelFactor1<Rot> {
|
||||
enum { Dim = Rot::VectorN2::RowsAtCompileTime };
|
||||
using MatrixNN = typename Rot::MatrixNN;
|
||||
Eigen::Matrix<double, Dim, 1> vecM_; ///< vectorized matrix to approximate
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
FrobeniusPrior(Key j, const MatrixNN& M,
|
||||
const SharedNoiseModel& model = nullptr)
|
||||
: NoiseModelFactor1<Rot>(ConvertPose3NoiseModel(model, Dim), j) {
|
||||
vecM_ << Eigen::Map<const Matrix>(M.data(), Dim, 1);
|
||||
}
|
||||
|
||||
/// Error is just Frobenius norm between Rot element and vectorized matrix M.
|
||||
Vector evaluateError(const Rot& R,
|
||||
boost::optional<Matrix&> H = boost::none) const {
|
||||
return R.vec(H) - vecM_; // Jacobian is computed only when needed.
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* FrobeniusFactor calculates the Frobenius norm between rotation matrices.
|
||||
* The template argument can be any fixed-size SO<N>.
|
||||
*/
|
||||
template <class Rot>
|
||||
class FrobeniusFactor : public NoiseModelFactor2<Rot, Rot> {
|
||||
enum { Dim = Rot::VectorN2::RowsAtCompileTime };
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
FrobeniusFactor(Key j1, Key j2, const SharedNoiseModel& model = nullptr)
|
||||
: NoiseModelFactor2<Rot, Rot>(ConvertPose3NoiseModel(model, Dim), j1,
|
||||
j2) {}
|
||||
|
||||
/// Error is just Frobenius norm between rotation matrices.
|
||||
Vector evaluateError(const Rot& R1, const Rot& R2,
|
||||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const {
|
||||
Vector error = R2.vec(H2) - R1.vec(H1);
|
||||
if (H1) *H1 = -*H1;
|
||||
return error;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* FrobeniusBetweenFactor is a BetweenFactor that evaluates the Frobenius norm
|
||||
* of the rotation error between measured and predicted (rather than the
|
||||
* Logmap of the error). This factor is only defined for fixed-dimension types,
|
||||
* and in fact only SO3 and SO4 really work, as we need SO<N>::AdjointMap.
|
||||
*/
|
||||
template <class Rot>
|
||||
class FrobeniusBetweenFactor : public NoiseModelFactor2<Rot, Rot> {
|
||||
Rot R12_; ///< measured rotation between R1 and R2
|
||||
Eigen::Matrix<double, Rot::dimension, Rot::dimension>
|
||||
R2hat_H_R1_; ///< fixed derivative of R2hat wrpt R1
|
||||
enum { Dim = Rot::VectorN2::RowsAtCompileTime };
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
FrobeniusBetweenFactor(Key j1, Key j2, const Rot& R12,
|
||||
const SharedNoiseModel& model = nullptr)
|
||||
: NoiseModelFactor2<Rot, Rot>(
|
||||
ConvertPose3NoiseModel(model, Dim), j1, j2),
|
||||
R12_(R12),
|
||||
R2hat_H_R1_(R12.inverse().AdjointMap()) {}
|
||||
|
||||
/// Error is Frobenius norm between R1*R12 and R2.
|
||||
Vector evaluateError(const Rot& R1, const Rot& R2,
|
||||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const {
|
||||
const Rot R2hat = R1.compose(R12_);
|
||||
Eigen::Matrix<double, Dim, Rot::dimension> vec_H_R2hat;
|
||||
Vector error = R2.vec(H2) - R2hat.vec(H1 ? &vec_H_R2hat : nullptr);
|
||||
if (H1) *H1 = -vec_H_R2hat * R2hat_H_R1_;
|
||||
return error;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* FrobeniusWormholeFactor is a BetweenFactor that moves in SO(p), but will
|
||||
* land on the SO(3) sub-manifold of SO(p) at the global minimum. It projects
|
||||
* the SO(p) matrices down to a Stiefel manifold of p*d matrices.
|
||||
* TODO(frank): template on D=2 or 3
|
||||
*/
|
||||
class GTSAM_EXPORT FrobeniusWormholeFactor : public NoiseModelFactor2<SOn, SOn> {
|
||||
Matrix M_; ///< measured rotation between R1 and R2
|
||||
size_t p_, pp_, dimension_; ///< dimensionality constants
|
||||
Matrix G_; ///< matrix of vectorized generators
|
||||
|
||||
public:
|
||||
/// Constructor. Note we convert to 3*p-dimensional noise model.
|
||||
FrobeniusWormholeFactor(Key j1, Key j2, const Rot3& R12, size_t p = 4,
|
||||
const SharedNoiseModel& model = nullptr);
|
||||
|
||||
/// Error is Frobenius norm between Q1*P*R12 and Q2*P, where P=[I_3x3;0]
|
||||
/// projects down from SO(p) to the Stiefel manifold of px3 matrices.
|
||||
Vector evaluateError(const SOn& Q1, const SOn& Q2,
|
||||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
|
@ -189,7 +189,7 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
|
||||
/// traits
|
||||
|
|
|
@ -113,7 +113,7 @@ public:
|
|||
return error;
|
||||
}
|
||||
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
} // namespace gtsam
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ protected:
|
|||
mutable FBlocks Fs;
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
@ -207,10 +207,18 @@ protected:
|
|||
Vector ue = cameras.reprojectionError(point, measured_, Fs, E);
|
||||
if (body_P_sensor_ && Fs) {
|
||||
const Pose3 sensor_P_body = body_P_sensor_->inverse();
|
||||
constexpr int camera_dim = traits<CAMERA>::dimension;
|
||||
constexpr int pose_dim = traits<Pose3>::dimension;
|
||||
|
||||
for (size_t i = 0; i < Fs->size(); i++) {
|
||||
const Pose3 w_Pose_body = cameras[i].pose() * sensor_P_body;
|
||||
Matrix J(6, 6);
|
||||
const Pose3 world_P_body = w_Pose_body.compose(*body_P_sensor_, J);
|
||||
const Pose3 world_P_body = cameras[i].pose() * sensor_P_body;
|
||||
Eigen::Matrix<double, camera_dim, camera_dim> J;
|
||||
J.setZero();
|
||||
Eigen::Matrix<double, pose_dim, pose_dim> H;
|
||||
// Call compose to compute Jacobian for camera extrinsics
|
||||
world_P_body.compose(*body_P_sensor_, H);
|
||||
// Assign extrinsics part of the Jacobian
|
||||
J.template block<pose_dim, pose_dim>(0, 0) = H;
|
||||
Fs->at(i) = Fs->at(i) * J;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -442,11 +442,11 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate,
|
|||
auto p = dynamic_cast<const GenericValue<Pose3>*>(&key_value.value);
|
||||
if (!p) continue;
|
||||
const Pose3& pose = p->value();
|
||||
Point3 t = pose.translation();
|
||||
Rot3 R = pose.rotation();
|
||||
stream << "VERTEX_SE3:QUAT " << key_value.key << " " << t.x() << " " << t.y() << " " << t.z()
|
||||
<< " " << R.toQuaternion().x() << " " << R.toQuaternion().y() << " " << R.toQuaternion().z()
|
||||
<< " " << R.toQuaternion().w() << endl;
|
||||
const Point3 t = pose.translation();
|
||||
const auto q = pose.rotation().toQuaternion();
|
||||
stream << "VERTEX_SE3:QUAT " << key_value.key << " " << t.x() << " "
|
||||
<< t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " "
|
||||
<< q.z() << " " << q.w() << endl;
|
||||
}
|
||||
|
||||
// save edges (2D or 3D)
|
||||
|
@ -486,13 +486,12 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate,
|
|||
throw invalid_argument("writeG2o: invalid noise model!");
|
||||
}
|
||||
Matrix Info = gaussianModel->R().transpose() * gaussianModel->R();
|
||||
Pose3 pose3D = factor3D->measured();
|
||||
Point3 p = pose3D.translation();
|
||||
Rot3 R = pose3D.rotation();
|
||||
|
||||
stream << "EDGE_SE3:QUAT " << factor3D->key1() << " " << factor3D->key2() << " "
|
||||
<< p.x() << " " << p.y() << " " << p.z() << " " << R.toQuaternion().x()
|
||||
<< " " << R.toQuaternion().y() << " " << R.toQuaternion().z() << " " << R.toQuaternion().w();
|
||||
const Pose3 pose3D = factor3D->measured();
|
||||
const Point3 p = pose3D.translation();
|
||||
const auto q = pose3D.rotation().toQuaternion();
|
||||
stream << "EDGE_SE3:QUAT " << factor3D->key1() << " " << factor3D->key2()
|
||||
<< " " << p.x() << " " << p.y() << " " << p.z() << " " << q.x()
|
||||
<< " " << q.y() << " " << q.z() << " " << q.w();
|
||||
|
||||
Matrix InfoG2o = I_6x6;
|
||||
InfoG2o.block(0,0,3,3) = Info.block(3,3,3,3); // cov translation
|
||||
|
@ -511,6 +510,11 @@ void writeG2o(const NonlinearFactorGraph& graph, const Values& estimate,
|
|||
stream.close();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
static Rot3 NormalizedRot3(double w, double x, double y, double z) {
|
||||
const double norm = sqrt(w * w + x * x + y * y + z * z), f = 1.0 / norm;
|
||||
return Rot3::Quaternion(f * w, f * x, f * y, f * z);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
std::map<Key, Pose3> parse3DPoses(const string& filename) {
|
||||
ifstream is(filename.c_str());
|
||||
|
@ -535,14 +539,15 @@ std::map<Key, Pose3> parse3DPoses(const string& filename) {
|
|||
Key id;
|
||||
double x, y, z, qx, qy, qz, qw;
|
||||
ls >> id >> x >> y >> z >> qx >> qy >> qz >> qw;
|
||||
poses.emplace(id, Pose3(Rot3::Quaternion(qw, qx, qy, qz), {x, y, z}));
|
||||
poses.emplace(id, Pose3(NormalizedRot3(qw, qx, qy, qz), {x, y, z}));
|
||||
}
|
||||
}
|
||||
return poses;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
BetweenFactorPose3s parse3DFactors(const string& filename,
|
||||
BetweenFactorPose3s parse3DFactors(
|
||||
const string& filename,
|
||||
const noiseModel::Diagonal::shared_ptr& corruptingNoise) {
|
||||
ifstream is(filename.c_str());
|
||||
if (!is) throw invalid_argument("parse3DFactors: can not find file " + filename);
|
||||
|
@ -592,7 +597,7 @@ BetweenFactorPose3s parse3DFactors(const string& filename,
|
|||
mgtsam.block<3, 3>(3, 0) = m.block<3, 3>(3, 0); // off diagonal
|
||||
|
||||
SharedNoiseModel model = noiseModel::Gaussian::Information(mgtsam);
|
||||
auto R12 = Rot3::Quaternion(qw, qx, qy, qz);
|
||||
auto R12 = NormalizedRot3(qw, qx, qy, qz);
|
||||
if (sampler) {
|
||||
R12 = R12.retract(sampler->sample());
|
||||
}
|
||||
|
|
|
@ -122,45 +122,6 @@ TEST( dataSet, Balbianello)
|
|||
EXPECT(assert_equal(expected,actual,1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( dataSet, readG2o)
|
||||
{
|
||||
const string g2oFile = findExampleDataFile("pose2example");
|
||||
NonlinearFactorGraph::shared_ptr actualGraph;
|
||||
Values::shared_ptr actualValues;
|
||||
boost::tie(actualGraph, actualValues) = readG2o(g2oFile);
|
||||
|
||||
Values expectedValues;
|
||||
expectedValues.insert(0, Pose2(0.000000, 0.000000, 0.000000));
|
||||
expectedValues.insert(1, Pose2(1.030390, 0.011350, -0.081596));
|
||||
expectedValues.insert(2, Pose2(2.036137, -0.129733, -0.301887));
|
||||
expectedValues.insert(3, Pose2(3.015097, -0.442395, -0.345514));
|
||||
expectedValues.insert(4, Pose2(3.343949, 0.506678, 1.214715));
|
||||
expectedValues.insert(5, Pose2(3.684491, 1.464049, 1.183785));
|
||||
expectedValues.insert(6, Pose2(4.064626, 2.414783, 1.176333));
|
||||
expectedValues.insert(7, Pose2(4.429778, 3.300180, 1.259169));
|
||||
expectedValues.insert(8, Pose2(4.128877, 2.321481, -1.825391));
|
||||
expectedValues.insert(9, Pose2(3.884653, 1.327509, -1.953016));
|
||||
expectedValues.insert(10, Pose2(3.531067, 0.388263, -2.148934));
|
||||
EXPECT(assert_equal(expectedValues,*actualValues,1e-5));
|
||||
|
||||
noiseModel::Diagonal::shared_ptr model = noiseModel::Diagonal::Precisions(Vector3(44.721360, 44.721360, 30.901699));
|
||||
NonlinearFactorGraph expectedGraph;
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(0, 1, Pose2(1.030390, 0.011350, -0.081596), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(1, 2, Pose2(1.013900, -0.058639, -0.220291), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(2, 3, Pose2(1.027650, -0.007456, -0.043627), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(4, 5, Pose2(1.016030, 0.014565, -0.030930), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 6, Pose2(1.023890, 0.006808, -0.007452), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(6, 7, Pose2(0.957734, 0.003159, 0.082836), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(8, 9, Pose2(1.023440, 0.013984, -0.127624), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(9,10, Pose2(1.003350, 0.022250, -0.195918), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 9, Pose2(0.033943, 0.032439, 3.073637), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3,10, Pose2(0.044020, 0.988477, -1.553511), model);
|
||||
EXPECT(assert_equal(expectedGraph,*actualGraph,1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(dataSet, readG2o3D) {
|
||||
const string g2oFile = findExampleDataFile("pose3example");
|
||||
|
@ -273,59 +234,103 @@ TEST( dataSet, readG2o3DNonDiagonalNoise)
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( dataSet, readG2oHuber)
|
||||
{
|
||||
const string g2oFile = findExampleDataFile("pose2example");
|
||||
NonlinearFactorGraph::shared_ptr actualGraph;
|
||||
Values::shared_ptr actualValues;
|
||||
bool is3D = false;
|
||||
boost::tie(actualGraph, actualValues) = readG2o(g2oFile, is3D, KernelFunctionTypeHUBER);
|
||||
TEST(dataSet, readG2oCheckDeterminants) {
|
||||
const string g2oFile = findExampleDataFile("toyExample.g2o");
|
||||
|
||||
noiseModel::Diagonal::shared_ptr baseModel = noiseModel::Diagonal::Precisions(Vector3(44.721360, 44.721360, 30.901699));
|
||||
SharedNoiseModel model = noiseModel::Robust::Create(noiseModel::mEstimator::Huber::Create(1.345), baseModel);
|
||||
// Check determinants in factors
|
||||
auto factors = parse3DFactors(g2oFile);
|
||||
EXPECT_LONGS_EQUAL(6, factors.size());
|
||||
for (const auto& factor : factors) {
|
||||
const Rot3 R = factor->measured().rotation();
|
||||
EXPECT_DOUBLES_EQUAL(1.0, R.matrix().determinant(), 1e-9);
|
||||
}
|
||||
|
||||
NonlinearFactorGraph expectedGraph;
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(0, 1, Pose2(1.030390, 0.011350, -0.081596), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(1, 2, Pose2(1.013900, -0.058639, -0.220291), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(2, 3, Pose2(1.027650, -0.007456, -0.043627), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(4, 5, Pose2(1.016030, 0.014565, -0.030930), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 6, Pose2(1.023890, 0.006808, -0.007452), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(6, 7, Pose2(0.957734, 0.003159, 0.082836), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(8, 9, Pose2(1.023440, 0.013984, -0.127624), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(9,10, Pose2(1.003350, 0.022250, -0.195918), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 9, Pose2(0.033943, 0.032439, 3.073637), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3,10, Pose2(0.044020, 0.988477, -1.553511), model);
|
||||
EXPECT(assert_equal(expectedGraph,*actualGraph,1e-5));
|
||||
// Check determinants in initial values
|
||||
const map<Key, Pose3> poses = parse3DPoses(g2oFile);
|
||||
EXPECT_LONGS_EQUAL(5, poses.size());
|
||||
for (const auto& key_value : poses) {
|
||||
const Rot3 R = key_value.second.rotation();
|
||||
EXPECT_DOUBLES_EQUAL(1.0, R.matrix().determinant(), 1e-9);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( dataSet, readG2oTukey)
|
||||
{
|
||||
static NonlinearFactorGraph expectedGraph(const SharedNoiseModel& model) {
|
||||
NonlinearFactorGraph g;
|
||||
using Factor = BetweenFactor<Pose2>;
|
||||
g.emplace_shared<Factor>(0, 1, Pose2(1.030390, 0.011350, -0.081596), model);
|
||||
g.emplace_shared<Factor>(1, 2, Pose2(1.013900, -0.058639, -0.220291), model);
|
||||
g.emplace_shared<Factor>(2, 3, Pose2(1.027650, -0.007456, -0.043627), model);
|
||||
g.emplace_shared<Factor>(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model);
|
||||
g.emplace_shared<Factor>(4, 5, Pose2(1.016030, 0.014565, -0.030930), model);
|
||||
g.emplace_shared<Factor>(5, 6, Pose2(1.023890, 0.006808, -0.007452), model);
|
||||
g.emplace_shared<Factor>(6, 7, Pose2(0.957734, 0.003159, 0.082836), model);
|
||||
g.emplace_shared<Factor>(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model);
|
||||
g.emplace_shared<Factor>(8, 9, Pose2(1.023440, 0.013984, -0.127624), model);
|
||||
g.emplace_shared<Factor>(9, 10, Pose2(1.003350, 0.022250, -0.195918), model);
|
||||
g.emplace_shared<Factor>(5, 9, Pose2(0.033943, 0.032439, 3.073637), model);
|
||||
g.emplace_shared<Factor>(3, 10, Pose2(0.044020, 0.988477, -1.553511), model);
|
||||
return g;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(dataSet, readG2o) {
|
||||
const string g2oFile = findExampleDataFile("pose2example");
|
||||
NonlinearFactorGraph::shared_ptr actualGraph;
|
||||
Values::shared_ptr actualValues;
|
||||
boost::tie(actualGraph, actualValues) = readG2o(g2oFile);
|
||||
|
||||
auto model = noiseModel::Diagonal::Precisions(
|
||||
Vector3(44.721360, 44.721360, 30.901699));
|
||||
EXPECT(assert_equal(expectedGraph(model), *actualGraph, 1e-5));
|
||||
|
||||
Values expectedValues;
|
||||
expectedValues.insert(0, Pose2(0.000000, 0.000000, 0.000000));
|
||||
expectedValues.insert(1, Pose2(1.030390, 0.011350, -0.081596));
|
||||
expectedValues.insert(2, Pose2(2.036137, -0.129733, -0.301887));
|
||||
expectedValues.insert(3, Pose2(3.015097, -0.442395, -0.345514));
|
||||
expectedValues.insert(4, Pose2(3.343949, 0.506678, 1.214715));
|
||||
expectedValues.insert(5, Pose2(3.684491, 1.464049, 1.183785));
|
||||
expectedValues.insert(6, Pose2(4.064626, 2.414783, 1.176333));
|
||||
expectedValues.insert(7, Pose2(4.429778, 3.300180, 1.259169));
|
||||
expectedValues.insert(8, Pose2(4.128877, 2.321481, -1.825391));
|
||||
expectedValues.insert(9, Pose2(3.884653, 1.327509, -1.953016));
|
||||
expectedValues.insert(10, Pose2(3.531067, 0.388263, -2.148934));
|
||||
EXPECT(assert_equal(expectedValues, *actualValues, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(dataSet, readG2oHuber) {
|
||||
const string g2oFile = findExampleDataFile("pose2example");
|
||||
NonlinearFactorGraph::shared_ptr actualGraph;
|
||||
Values::shared_ptr actualValues;
|
||||
bool is3D = false;
|
||||
boost::tie(actualGraph, actualValues) = readG2o(g2oFile, is3D, KernelFunctionTypeTUKEY);
|
||||
boost::tie(actualGraph, actualValues) =
|
||||
readG2o(g2oFile, is3D, KernelFunctionTypeHUBER);
|
||||
|
||||
noiseModel::Diagonal::shared_ptr baseModel = noiseModel::Diagonal::Precisions(Vector3(44.721360, 44.721360, 30.901699));
|
||||
SharedNoiseModel model = noiseModel::Robust::Create(noiseModel::mEstimator::Tukey::Create(4.6851), baseModel);
|
||||
auto baseModel = noiseModel::Diagonal::Precisions(
|
||||
Vector3(44.721360, 44.721360, 30.901699));
|
||||
auto model = noiseModel::Robust::Create(
|
||||
noiseModel::mEstimator::Huber::Create(1.345), baseModel);
|
||||
|
||||
NonlinearFactorGraph expectedGraph;
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(0, 1, Pose2(1.030390, 0.011350, -0.081596), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(1, 2, Pose2(1.013900, -0.058639, -0.220291), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(2, 3, Pose2(1.027650, -0.007456, -0.043627), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3, 4, Pose2(-0.012016, 1.004360, 1.560229), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(4, 5, Pose2(1.016030, 0.014565, -0.030930), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 6, Pose2(1.023890, 0.006808, -0.007452), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(6, 7, Pose2(0.957734, 0.003159, 0.082836), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(7, 8, Pose2(-1.023820, -0.013668, -3.084560), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(8, 9, Pose2(1.023440, 0.013984, -0.127624), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(9,10, Pose2(1.003350, 0.022250, -0.195918), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(5, 9, Pose2(0.033943, 0.032439, 3.073637), model);
|
||||
expectedGraph.emplace_shared<BetweenFactor<Pose2> >(3,10, Pose2(0.044020, 0.988477, -1.553511), model);
|
||||
EXPECT(assert_equal(expectedGraph,*actualGraph,1e-5));
|
||||
EXPECT(assert_equal(expectedGraph(model), *actualGraph, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(dataSet, readG2oTukey) {
|
||||
const string g2oFile = findExampleDataFile("pose2example");
|
||||
NonlinearFactorGraph::shared_ptr actualGraph;
|
||||
Values::shared_ptr actualValues;
|
||||
bool is3D = false;
|
||||
boost::tie(actualGraph, actualValues) =
|
||||
readG2o(g2oFile, is3D, KernelFunctionTypeTUKEY);
|
||||
|
||||
auto baseModel = noiseModel::Diagonal::Precisions(
|
||||
Vector3(44.721360, 44.721360, 30.901699));
|
||||
auto model = noiseModel::Robust::Create(
|
||||
noiseModel::mEstimator::Tukey::Create(4.6851), baseModel);
|
||||
|
||||
EXPECT(assert_equal(expectedGraph(model), *actualGraph, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -495,7 +500,7 @@ TEST( dataSet, writeBALfromValues_Dubrovnik){
|
|||
SfmData readData;
|
||||
readBAL(filenameToRead, readData);
|
||||
|
||||
Pose3 poseChange = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), gtsam::Point3(0.3,0.1,0.3));
|
||||
Pose3 poseChange = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10), Point3(0.3,0.1,0.3));
|
||||
|
||||
Values value;
|
||||
for(size_t i=0; i < readData.number_cameras(); i++){ // for each camera
|
||||
|
|
|
@ -0,0 +1,244 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2019, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* testFrobeniusFactor.cpp
|
||||
*
|
||||
* @file testFrobeniusFactor.cpp
|
||||
* @date March 2019
|
||||
* @author Frank Dellaert
|
||||
* @brief Check evaluateError for various Frobenius norm
|
||||
*/
|
||||
|
||||
#include <gtsam/base/lieProxies.h>
|
||||
#include <gtsam/base/testLie.h>
|
||||
#include <gtsam/geometry/Rot3.h>
|
||||
#include <gtsam/geometry/SO3.h>
|
||||
#include <gtsam/geometry/SO4.h>
|
||||
#include <gtsam/nonlinear/GaussNewtonOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/factorTesting.h>
|
||||
#include <gtsam/slam/FrobeniusFactor.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
//******************************************************************************
|
||||
namespace so3 {
|
||||
SO3 id;
|
||||
Vector3 v1 = (Vector(3) << 0.1, 0, 0).finished();
|
||||
SO3 R1 = SO3::Expmap(v1);
|
||||
Vector3 v2 = (Vector(3) << 0.01, 0.02, 0.03).finished();
|
||||
SO3 R2 = SO3::Expmap(v2);
|
||||
SO3 R12 = R1.between(R2);
|
||||
} // namespace so3
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusPriorSO3, evaluateError) {
|
||||
using namespace ::so3;
|
||||
auto factor = FrobeniusPrior<SO3>(1, R2.matrix());
|
||||
Vector actual = factor.evaluateError(R1);
|
||||
Vector expected = R1.vec() - R2.vec();
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
Values values;
|
||||
values.insert(1, R1);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusPriorSO3, ClosestTo) {
|
||||
// Example top-left of SO(4) matrix not quite on SO(3) manifold
|
||||
Matrix3 M;
|
||||
M << 0.79067393, 0.6051136, -0.0930814, //
|
||||
0.4155925, -0.64214347, -0.64324489, //
|
||||
-0.44948549, 0.47046326, -0.75917576;
|
||||
|
||||
SO3 expected = SO3::ClosestTo(M);
|
||||
|
||||
// manifold optimization gets same result as SVD solution in ClosestTo
|
||||
NonlinearFactorGraph graph;
|
||||
graph.emplace_shared<FrobeniusPrior<SO3> >(1, M);
|
||||
|
||||
Values initial;
|
||||
initial.insert(1, SO3(I_3x3));
|
||||
auto result = GaussNewtonOptimizer(graph, initial).optimize();
|
||||
EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 1e-6);
|
||||
EXPECT(assert_equal(expected, result.at<SO3>(1), 1e-6));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusPriorSO3, ChordalL2mean) {
|
||||
// See Hartley13ijcv:
|
||||
// Cost function C(R) = \sum FrobeniusPrior(R,R_i)
|
||||
// Closed form solution = ClosestTo(C_e), where
|
||||
// C_e = \sum R_i !!!!
|
||||
|
||||
// We will test by computing mean of R1=exp(v1) R1^T=exp(-v1):
|
||||
using namespace ::so3;
|
||||
SO3 expected; // identity
|
||||
Matrix3 M = R1.matrix() + R1.matrix().transpose();
|
||||
EXPECT(assert_equal(expected, SO3::ClosestTo(M), 1e-6));
|
||||
EXPECT(assert_equal(expected, SO3::ChordalMean({R1, R1.inverse()}), 1e-6));
|
||||
|
||||
// manifold optimization gets same result as ChordalMean
|
||||
NonlinearFactorGraph graph;
|
||||
graph.emplace_shared<FrobeniusPrior<SO3> >(1, R1.matrix());
|
||||
graph.emplace_shared<FrobeniusPrior<SO3> >(1, R1.matrix().transpose());
|
||||
|
||||
Values initial;
|
||||
initial.insert<SO3>(1, R1.inverse());
|
||||
auto result = GaussNewtonOptimizer(graph, initial).optimize();
|
||||
EXPECT_DOUBLES_EQUAL(0.0, graph.error(result), 0.1); // Why so loose?
|
||||
EXPECT(assert_equal(expected, result.at<SO3>(1), 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusFactorSO3, evaluateError) {
|
||||
using namespace ::so3;
|
||||
auto factor = FrobeniusFactor<SO3>(1, 2);
|
||||
Vector actual = factor.evaluateError(R1, R2);
|
||||
Vector expected = R2.vec() - R1.vec();
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
Values values;
|
||||
values.insert(1, R1);
|
||||
values.insert(2, R2);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Commented out as SO(n) not yet supported (and might never be)
|
||||
// TEST(FrobeniusBetweenFactorSOn, evaluateError) {
|
||||
// using namespace ::so3;
|
||||
// auto factor =
|
||||
// FrobeniusBetweenFactor<SOn>(1, 2, SOn::FromMatrix(R12.matrix()));
|
||||
// Vector actual = factor.evaluateError(SOn::FromMatrix(R1.matrix()),
|
||||
// SOn::FromMatrix(R2.matrix()));
|
||||
// Vector expected = Vector9::Zero();
|
||||
// EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
// Values values;
|
||||
// values.insert(1, R1);
|
||||
// values.insert(2, R2);
|
||||
// EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
// }
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusBetweenFactorSO3, evaluateError) {
|
||||
using namespace ::so3;
|
||||
auto factor = FrobeniusBetweenFactor<SO3>(1, 2, R12);
|
||||
Vector actual = factor.evaluateError(R1, R2);
|
||||
Vector expected = Vector9::Zero();
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
Values values;
|
||||
values.insert(1, R1);
|
||||
values.insert(2, R2);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
namespace so4 {
|
||||
SO4 id;
|
||||
Vector6 v1 = (Vector(6) << 0.1, 0, 0, 0, 0, 0).finished();
|
||||
SO4 Q1 = SO4::Expmap(v1);
|
||||
Vector6 v2 = (Vector(6) << 0.01, 0.02, 0.03, 0.04, 0.05, 0.06).finished();
|
||||
SO4 Q2 = SO4::Expmap(v2);
|
||||
} // namespace so4
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusFactorSO4, evaluateError) {
|
||||
using namespace ::so4;
|
||||
auto factor = FrobeniusFactor<SO4>(1, 2, noiseModel::Unit::Create(6));
|
||||
Vector actual = factor.evaluateError(Q1, Q2);
|
||||
Vector expected = Q2.vec() - Q1.vec();
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
Values values;
|
||||
values.insert(1, Q1);
|
||||
values.insert(2, Q2);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusBetweenFactorSO4, evaluateError) {
|
||||
using namespace ::so4;
|
||||
Matrix4 M{I_4x4};
|
||||
M.topLeftCorner<3, 3>() = ::so3::R12.matrix();
|
||||
auto factor = FrobeniusBetweenFactor<SO4>(1, 2, Q1.between(Q2));
|
||||
Matrix H1, H2;
|
||||
Vector actual = factor.evaluateError(Q1, Q2, H1, H2);
|
||||
Vector expected = SO4::VectorN2::Zero();
|
||||
EXPECT(assert_equal(expected, actual, 1e-9));
|
||||
|
||||
Values values;
|
||||
values.insert(1, Q1);
|
||||
values.insert(2, Q2);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
namespace submanifold {
|
||||
SO4 id;
|
||||
Vector6 v1 = (Vector(6) << 0, 0, 0, 0.1, 0, 0).finished();
|
||||
SO3 R1 = SO3::Expmap(v1.tail<3>());
|
||||
SO4 Q1 = SO4::Expmap(v1);
|
||||
Vector6 v2 = (Vector(6) << 0, 0, 0, 0.01, 0.02, 0.03).finished();
|
||||
SO3 R2 = SO3::Expmap(v2.tail<3>());
|
||||
SO4 Q2 = SO4::Expmap(v2);
|
||||
SO3 R12 = R1.between(R2);
|
||||
} // namespace submanifold
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusWormholeFactor, evaluateError) {
|
||||
auto model = noiseModel::Isotropic::Sigma(6, 1.2); // dimension = 6 not 16
|
||||
for (const size_t p : {5, 4, 3}) {
|
||||
Matrix M = Matrix::Identity(p, p);
|
||||
M.topLeftCorner(3, 3) = submanifold::R1.matrix();
|
||||
SOn Q1(M);
|
||||
M.topLeftCorner(3, 3) = submanifold::R2.matrix();
|
||||
SOn Q2(M);
|
||||
auto factor =
|
||||
FrobeniusWormholeFactor(1, 2, Rot3(::so3::R12.matrix()), p, model);
|
||||
Matrix H1, H2;
|
||||
factor.evaluateError(Q1, Q2, H1, H2);
|
||||
|
||||
// Test derivatives
|
||||
Values values;
|
||||
values.insert(1, Q1);
|
||||
values.insert(2, Q2);
|
||||
EXPECT_CORRECT_FACTOR_JACOBIANS(factor, values, 1e-7, 1e-5);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(FrobeniusWormholeFactor, equivalenceToSO3) {
|
||||
using namespace ::submanifold;
|
||||
auto R12 = ::so3::R12.retract(Vector3(0.1, 0.2, -0.1));
|
||||
auto model = noiseModel::Isotropic::Sigma(6, 1.2); // wrong dimension
|
||||
auto factor3 = FrobeniusBetweenFactor<SO3>(1, 2, R12, model);
|
||||
auto factor4 = FrobeniusWormholeFactor(1, 2, Rot3(R12.matrix()), 4, model);
|
||||
const Matrix3 E3(factor3.evaluateError(R1, R2).data());
|
||||
const Matrix43 E4(
|
||||
factor4.evaluateError(SOn(Q1.matrix()), SOn(Q2.matrix())).data());
|
||||
EXPECT(assert_equal((Matrix)E4.topLeftCorner<3, 3>(), E3, 1e-9));
|
||||
EXPECT(assert_equal((Matrix)E4.row(3), Matrix13::Zero(), 1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -37,11 +37,11 @@ class PinholeFactor: public SmartFactorBase<PinholeCamera<Cal3Bundler> > {
|
|||
public:
|
||||
typedef SmartFactorBase<PinholeCamera<Cal3Bundler> > Base;
|
||||
PinholeFactor() {}
|
||||
PinholeFactor(const SharedNoiseModel& sharedNoiseModel): Base(sharedNoiseModel) {
|
||||
}
|
||||
virtual double error(const Values& values) const {
|
||||
return 0.0;
|
||||
}
|
||||
PinholeFactor(const SharedNoiseModel& sharedNoiseModel,
|
||||
boost::optional<Pose3> body_P_sensor = boost::none,
|
||||
size_t expectedNumberCameras = 10)
|
||||
: Base(sharedNoiseModel, body_P_sensor, expectedNumberCameras) {}
|
||||
virtual double error(const Values& values) const { return 0.0; }
|
||||
virtual boost::shared_ptr<GaussianFactor> linearize(
|
||||
const Values& values) const {
|
||||
return boost::shared_ptr<GaussianFactor>(new JacobianFactor());
|
||||
|
@ -60,6 +60,40 @@ TEST(SmartFactorBase, Pinhole) {
|
|||
EXPECT_LONGS_EQUAL(2 * 2, f.dim());
|
||||
}
|
||||
|
||||
TEST(SmartFactorBase, PinholeWithSensor) {
|
||||
Pose3 body_P_sensor(Rot3(), Point3(1, 0, 0));
|
||||
PinholeFactor f = PinholeFactor(unit2, body_P_sensor);
|
||||
EXPECT(assert_equal<Pose3>(f.body_P_sensor(), body_P_sensor));
|
||||
|
||||
PinholeFactor::Cameras cameras;
|
||||
// Assume body at origin.
|
||||
Pose3 world_P_body = Pose3();
|
||||
// Camera coordinates in world frame.
|
||||
Pose3 wTc = world_P_body * body_P_sensor;
|
||||
cameras.push_back(PinholeCamera<Cal3Bundler>(wTc));
|
||||
|
||||
// Simple point to project slightly off image center
|
||||
Point3 p(0, 0, 10);
|
||||
Point2 measurement = cameras[0].project(p);
|
||||
f.add(measurement, 1);
|
||||
|
||||
PinholeFactor::Cameras::FBlocks Fs;
|
||||
Matrix E;
|
||||
Vector error = f.unwhitenedError<Point3>(cameras, p, Fs, E);
|
||||
|
||||
Vector expectedError = Vector::Zero(2);
|
||||
Matrix29 expectedFs;
|
||||
expectedFs << -0.001, -1.00001, 0, -0.1, 0, -0.01, 0, 0, 0, 1, 0, 0, 0, -0.1, 0, 0, 0, 0;
|
||||
Matrix23 expectedE;
|
||||
expectedE << 0.1, 0, 0.01, 0, 0.1, 0;
|
||||
|
||||
EXPECT(assert_equal(error, expectedError));
|
||||
// We only have the jacobian for the 1 camera
|
||||
// Use of a lower tolerance value due to compiler precision mismatch.
|
||||
EXPECT(assert_equal(expectedFs, Fs[0], 1e-3));
|
||||
EXPECT(assert_equal(expectedE, E));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
#include <gtsam/geometry/StereoCamera.h>
|
||||
|
||||
|
|
|
@ -9,5 +9,6 @@ set (GTSAM_USE_TBB @GTSAM_USE_TBB@)
|
|||
set (GTSAM_DEFAULT_ALLOCATOR @GTSAM_DEFAULT_ALLOCATOR@)
|
||||
|
||||
if("@GTSAM_INSTALL_CYTHON_TOOLBOX@")
|
||||
list(APPEND GTSAM_CYTHON_INSTALL_PATH "@GTSAM_CYTHON_INSTALL_PATH@")
|
||||
list(APPEND GTSAM_EIGENCY_INSTALL_PATH "@GTSAM_EIGENCY_INSTALL_PATH@")
|
||||
endif()
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
/**
|
||||
* @file PoseToPointFactor.hpp
|
||||
* @brief This factor can be used to track a 3D landmark over time by
|
||||
*providing local measurements of its location.
|
||||
* @author David Wisth
|
||||
**/
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
#include <ostream>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* A class for a measurement between a pose and a point.
|
||||
* @addtogroup SLAM
|
||||
*/
|
||||
class PoseToPointFactor : public NoiseModelFactor2<Pose3, Point3> {
|
||||
private:
|
||||
typedef PoseToPointFactor This;
|
||||
typedef NoiseModelFactor2<Pose3, Point3> Base;
|
||||
|
||||
Point3 measured_; /** the point measurement in local coordinates */
|
||||
|
||||
public:
|
||||
// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<PoseToPointFactor> shared_ptr;
|
||||
|
||||
/** default constructor - only use for serialization */
|
||||
PoseToPointFactor() {}
|
||||
|
||||
/** Constructor */
|
||||
PoseToPointFactor(Key key1, Key key2, const Point3& measured,
|
||||
const SharedNoiseModel& model)
|
||||
: Base(model, key1, key2), measured_(measured) {}
|
||||
|
||||
virtual ~PoseToPointFactor() {}
|
||||
|
||||
/** implement functions needed for Testable */
|
||||
|
||||
/** print */
|
||||
virtual void print(const std::string& s, const KeyFormatter& keyFormatter =
|
||||
DefaultKeyFormatter) const {
|
||||
std::cout << s << "PoseToPointFactor(" << keyFormatter(this->key1()) << ","
|
||||
<< keyFormatter(this->key2()) << ")\n"
|
||||
<< " measured: " << measured_.transpose() << std::endl;
|
||||
this->noiseModel_->print(" noise model: ");
|
||||
}
|
||||
|
||||
/** equals */
|
||||
virtual bool equals(const NonlinearFactor& expected,
|
||||
double tol = 1e-9) const {
|
||||
const This* e = dynamic_cast<const This*>(&expected);
|
||||
return e != nullptr && Base::equals(*e, tol) &&
|
||||
traits<Point3>::Equals(this->measured_, e->measured_, tol);
|
||||
}
|
||||
|
||||
/** implement functions needed to derive from Factor */
|
||||
|
||||
/** vector of errors
|
||||
* @brief Error = wTwi.inverse()*wPwp - measured_
|
||||
* @param wTwi The pose of the sensor in world coordinates
|
||||
* @param wPwp The estimated point location in world coordinates
|
||||
*
|
||||
* Note: measured_ and the error are in local coordiantes.
|
||||
*/
|
||||
Vector evaluateError(const Pose3& wTwi, const Point3& wPwp,
|
||||
boost::optional<Matrix&> H1 = boost::none,
|
||||
boost::optional<Matrix&> H2 = boost::none) const {
|
||||
return wTwi.transformTo(wPwp, H1, H2) - measured_;
|
||||
}
|
||||
|
||||
/** return the measured */
|
||||
const Point3& measured() const { return measured_; }
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||
ar& boost::serialization::make_nvp(
|
||||
"NoiseModelFactor2", boost::serialization::base_object<Base>(*this));
|
||||
ar& BOOST_SERIALIZATION_NVP(measured_);
|
||||
}
|
||||
|
||||
}; // \class PoseToPointFactor
|
||||
|
||||
} // namespace gtsam
|
|
@ -0,0 +1,86 @@
|
|||
/**
|
||||
* @file testPoseToPointFactor.cpp
|
||||
* @brief
|
||||
* @author David Wisth
|
||||
* @date June 20, 2020
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam_unstable/slam/PoseToPointFactor.h>
|
||||
|
||||
using namespace gtsam;
|
||||
using namespace gtsam::noiseModel;
|
||||
|
||||
/// Verify zero error when there is no noise
|
||||
TEST(PoseToPointFactor, errorNoiseless) {
|
||||
Pose3 pose = Pose3::identity();
|
||||
Point3 point(1.0, 2.0, 3.0);
|
||||
Point3 noise(0.0, 0.0, 0.0);
|
||||
Point3 measured = t + noise;
|
||||
|
||||
Key pose_key(1);
|
||||
Key point_key(2);
|
||||
PoseToPointFactor factor(pose_key, point_key, measured,
|
||||
Isotropic::Sigma(3, 0.05));
|
||||
Vector expectedError = Vector3(0.0, 0.0, 0.0);
|
||||
Vector actualError = factor.evaluateError(pose, point);
|
||||
EXPECT(assert_equal(expectedError, actualError, 1E-5));
|
||||
}
|
||||
|
||||
/// Verify expected error in test scenario
|
||||
TEST(PoseToPointFactor, errorNoise) {
|
||||
Pose3 pose = Pose3::identity();
|
||||
Point3 point(1.0, 2.0, 3.0);
|
||||
Point3 noise(-1.0, 0.5, 0.3);
|
||||
Point3 measured = t + noise;
|
||||
|
||||
Key pose_key(1);
|
||||
Key point_key(2);
|
||||
PoseToPointFactor factor(pose_key, point_key, measured,
|
||||
Isotropic::Sigma(3, 0.05));
|
||||
Vector expectedError = noise;
|
||||
Vector actualError = factor.evaluateError(pose, point);
|
||||
EXPECT(assert_equal(expectedError, actualError, 1E-5));
|
||||
}
|
||||
|
||||
/// Check Jacobians are correct
|
||||
TEST(PoseToPointFactor, jacobian) {
|
||||
// Measurement
|
||||
gtsam::Point3 l_meas = gtsam::Point3(1, 2, 3);
|
||||
|
||||
// Linearisation point
|
||||
gtsam::Point3 p_t = gtsam::Point3(-5, 12, 2);
|
||||
gtsam::Rot3 p_R = gtsam::Rot3::RzRyRx(1.5 * M_PI, -0.3 * M_PI, 0.4 * M_PI);
|
||||
Pose3 p(p_R, p_t);
|
||||
|
||||
gtsam::Point3 l = gtsam::Point3(3, 0, 5);
|
||||
|
||||
// Factor
|
||||
Key pose_key(1);
|
||||
Key point_key(2);
|
||||
SharedGaussian noise = noiseModel::Diagonal::Sigmas(Vector3(0.1, 0.1, 0.1));
|
||||
PoseToPointFactor factor(pose_key, point_key, l_meas, noise);
|
||||
|
||||
// Calculate numerical derivatives
|
||||
auto f = boost::bind(&PoseToPointFactor::evaluateError, factor, _1, _2,
|
||||
boost::none, boost::none);
|
||||
Matrix numerical_H1 = numericalDerivative21<Vector, Pose3, Point3>(f, p, l);
|
||||
Matrix numerical_H2 = numericalDerivative22<Vector, Pose3, Point3>(f, p, l);
|
||||
|
||||
// Use the factor to calculate the derivative
|
||||
Matrix actual_H1;
|
||||
Matrix actual_H2;
|
||||
factor.evaluateError(p, l, actual_H1, actual_H2);
|
||||
|
||||
// Verify we get the expected error
|
||||
EXPECT_TRUE(assert_equal(numerical_H1, actual_H1, 1e-8));
|
||||
EXPECT_TRUE(assert_equal(numerical_H2, actual_H2, 1e-8));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -0,0 +1,87 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010-2020, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file testTranslationRecovery.cpp
|
||||
* @author Frank Dellaert
|
||||
* @date March 2020
|
||||
* @brief test recovering translations when rotations are given.
|
||||
*/
|
||||
|
||||
#include <gtsam/sfm/TranslationRecovery.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
// We read the BAL file, which has 3 cameras in it, with poses. We then assume
|
||||
// the rotations are correct, but translations have to be estimated from
|
||||
// translation directions only. Since we have 3 cameras, A, B, and C, we can at
|
||||
// most create three relative measurements, let's call them w_aZb, w_aZc, and
|
||||
// bZc. These will be of type Unit3. We then call `recoverTranslations` which
|
||||
// sets up an optimization problem for the three unknown translations.
|
||||
TEST(TranslationRecovery, BAL) {
|
||||
const string filename = findExampleDataFile("dubrovnik-3-7-pre");
|
||||
SfmData db;
|
||||
bool success = readBAL(filename, db);
|
||||
if (!success) throw runtime_error("Could not access file!");
|
||||
|
||||
// Get camera poses, as Values
|
||||
size_t j = 0;
|
||||
Values poses;
|
||||
for (auto camera : db.cameras) {
|
||||
poses.insert(j++, camera.pose());
|
||||
}
|
||||
|
||||
// Simulate measurements
|
||||
const auto relativeTranslations = TranslationRecovery::SimulateMeasurements(
|
||||
poses, {{0, 1}, {0, 2}, {1, 2}});
|
||||
|
||||
// Check
|
||||
const Pose3 wTa = poses.at<Pose3>(0), wTb = poses.at<Pose3>(1),
|
||||
wTc = poses.at<Pose3>(2);
|
||||
const Point3 Ta = wTa.translation(), Tb = wTb.translation(),
|
||||
Tc = wTc.translation();
|
||||
const Rot3 aRw = wTa.rotation().inverse();
|
||||
const Unit3 w_aZb = relativeTranslations.at({0, 1});
|
||||
EXPECT(assert_equal(Unit3(Tb - Ta), w_aZb));
|
||||
const Unit3 w_aZc = relativeTranslations.at({0, 2});
|
||||
EXPECT(assert_equal(Unit3(Tc - Ta), w_aZc));
|
||||
|
||||
TranslationRecovery algorithm(relativeTranslations);
|
||||
const auto graph = algorithm.buildGraph();
|
||||
EXPECT_LONGS_EQUAL(3, graph.size());
|
||||
|
||||
// Translation recovery, version 1
|
||||
const double scale = 2.0;
|
||||
const auto result = algorithm.run(scale);
|
||||
|
||||
// Check result for first two translations, determined by prior
|
||||
EXPECT(assert_equal(Point3(0, 0, 0), result.at<Point3>(0)));
|
||||
EXPECT(assert_equal(Point3(2 * w_aZb.point3()), result.at<Point3>(1)));
|
||||
|
||||
// Check that the third translations is correct
|
||||
Point3 expected = (Tc - Ta) * (scale / (Tb - Ta).norm());
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(2), 1e-4));
|
||||
|
||||
// TODO(frank): how to get stats back?
|
||||
// EXPECT_DOUBLES_EQUAL(0.0199833, actualError, 1e-5);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
|
@ -0,0 +1,110 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file timeFrobeniusFactor.cpp
|
||||
* @brief time FrobeniusFactor with BAL file
|
||||
* @author Frank Dellaert
|
||||
* @date June 6, 2015
|
||||
*/
|
||||
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/SO4.h>
|
||||
#include <gtsam/linear/NoiseModel.h>
|
||||
#include <gtsam/linear/PCGSolver.h>
|
||||
#include <gtsam/linear/SubgraphPreconditioner.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearEquality.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/slam/FrobeniusFactor.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
static SharedNoiseModel gNoiseModel = noiseModel::Unit::Create(2);
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
// primitive argument parsing:
|
||||
if (argc > 3) {
|
||||
throw runtime_error("Usage: timeFrobeniusFactor [g2oFile]");
|
||||
}
|
||||
|
||||
string g2oFile;
|
||||
try {
|
||||
if (argc > 1)
|
||||
g2oFile = argv[argc - 1];
|
||||
else
|
||||
g2oFile =
|
||||
"/Users/dellaert/git/2019c-notes-shonanrotationaveraging/matlabCode/"
|
||||
"datasets/randomTorus3D.g2o";
|
||||
// g2oFile = findExampleDataFile("sphere_smallnoise.graph");
|
||||
} catch (const exception& e) {
|
||||
cerr << e.what() << '\n';
|
||||
exit(1);
|
||||
}
|
||||
|
||||
// Read G2O file
|
||||
const auto factors = parse3DFactors(g2oFile);
|
||||
const auto poses = parse3DPoses(g2oFile);
|
||||
|
||||
// Build graph
|
||||
NonlinearFactorGraph graph;
|
||||
// graph.add(NonlinearEquality<SO4>(0, SO4()));
|
||||
auto priorModel = noiseModel::Isotropic::Sigma(6, 10000);
|
||||
graph.add(PriorFactor<SO4>(0, SO4(), priorModel));
|
||||
for (const auto& factor : factors) {
|
||||
const auto& keys = factor->keys();
|
||||
const auto& Tij = factor->measured();
|
||||
const auto& model = factor->noiseModel();
|
||||
graph.emplace_shared<FrobeniusWormholeFactor>(
|
||||
keys[0], keys[1], SO3(Tij.rotation().matrix()), model);
|
||||
}
|
||||
|
||||
boost::mt19937 rng(42);
|
||||
|
||||
// Set parameters to be similar to ceres
|
||||
LevenbergMarquardtParams params;
|
||||
LevenbergMarquardtParams::SetCeresDefaults(¶ms);
|
||||
params.setLinearSolverType("MULTIFRONTAL_QR");
|
||||
// params.setVerbosityLM("SUMMARY");
|
||||
// params.linearSolverType = LevenbergMarquardtParams::Iterative;
|
||||
// auto pcg = boost::make_shared<PCGSolverParameters>();
|
||||
// pcg->preconditioner_ =
|
||||
// boost::make_shared<SubgraphPreconditionerParameters>();
|
||||
// boost::make_shared<BlockJacobiPreconditionerParameters>();
|
||||
// params.iterativeParams = pcg;
|
||||
|
||||
// Optimize
|
||||
for (size_t i = 0; i < 100; i++) {
|
||||
gttic_(optimize);
|
||||
Values initial;
|
||||
initial.insert(0, SO4());
|
||||
for (size_t j = 1; j < poses.size(); j++) {
|
||||
initial.insert(j, SO4::Random(rng));
|
||||
}
|
||||
LevenbergMarquardtOptimizer lm(graph, initial, params);
|
||||
Values result = lm.optimize();
|
||||
cout << "cost = " << graph.error(result) << endl;
|
||||
}
|
||||
|
||||
tictoc_finishedIteration_();
|
||||
tictoc_print_();
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -350,7 +350,10 @@ void Module::emit_cython_pxd(FileWriter& pxdFile) const {
|
|||
" T* get()\n"
|
||||
" long use_count() const\n"
|
||||
" T& operator*()\n\n"
|
||||
" cdef shared_ptr[T] dynamic_pointer_cast[T,U](const shared_ptr[U]& r)\n"
|
||||
" cdef shared_ptr[T] dynamic_pointer_cast[T,U](const shared_ptr[U]& r)\n\n";
|
||||
|
||||
// gtsam alignment-friendly shared_ptr
|
||||
pxdFile.oss << "cdef extern from \"gtsam/base/make_shared.h\" namespace \"gtsam\":\n"
|
||||
" cdef shared_ptr[T] make_shared[T](const T& r)\n\n";
|
||||
|
||||
for(const TypedefPair& types: typedefs)
|
||||
|
|
|
@ -16,6 +16,8 @@ cdef extern from "boost/shared_ptr.hpp" namespace "boost":
|
|||
T& operator*()
|
||||
|
||||
cdef shared_ptr[T] dynamic_pointer_cast[T,U](const shared_ptr[U]& r)
|
||||
|
||||
cdef extern from "gtsam/base/make_shared.h" namespace "gtsam":
|
||||
cdef shared_ptr[T] make_shared[T](const T& r)
|
||||
|
||||
cdef extern from "gtsam/geometry/Point2.h" namespace "gtsam":
|
||||
|
|
Loading…
Reference in New Issue