Merge branch 'develop' into feature/discrete_wrapper
commit
7891154d8e
|
|
@ -3,6 +3,7 @@
|
|||
.idea
|
||||
*.pyc
|
||||
*.DS_Store
|
||||
*.swp
|
||||
/examples/Data/dubrovnik-3-7-pre-rewritten.txt
|
||||
/examples/Data/pose2example-rewritten.txt
|
||||
/examples/Data/pose3example-rewritten.txt
|
||||
|
|
|
|||
|
|
@ -105,6 +105,11 @@ endif()
|
|||
GtsamMakeConfigFile(GTSAM "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
|
||||
export(TARGETS ${GTSAM_EXPORTED_TARGETS} FILE GTSAM-exports.cmake)
|
||||
|
||||
if (GTSAM_BUILD_UNSTABLE)
|
||||
GtsamMakeConfigFile(GTSAM_UNSTABLE "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_extra.cmake.in")
|
||||
export(TARGETS ${GTSAM_UNSTABLE_EXPORTED_TARGETS} FILE GTSAM_UNSTABLE-exports.cmake)
|
||||
endif()
|
||||
|
||||
# Check for doxygen availability - optional dependency
|
||||
find_package(Doxygen)
|
||||
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@ function(GtsamMakeConfigFile PACKAGE_NAME)
|
|||
# here.
|
||||
if(NOT DEFINED ${PACKAGE_NAME}_VERSION AND DEFINED ${PACKAGE_NAME}_VERSION_STRING)
|
||||
set(${PACKAGE_NAME}_VERSION ${${PACKAGE_NAME}_VERSION_STRING})
|
||||
elseif(NOT DEFINED ${PACKAGE_NAME}_VERSION_STRING)
|
||||
set(${PACKAGE_NAME}_VERSION ${GTSAM_VERSION_STRING})
|
||||
endif()
|
||||
|
||||
# Version file
|
||||
|
|
|
|||
|
|
@ -14,20 +14,21 @@ if(GTSAM_UNSTABLE_AVAILABLE)
|
|||
option(GTSAM_UNSTABLE_BUILD_PYTHON "Enable/Disable Python wrapper for libgtsam_unstable" ON)
|
||||
option(GTSAM_UNSTABLE_INSTALL_MATLAB_TOOLBOX "Enable/Disable MATLAB wrapper for libgtsam_unstable" OFF)
|
||||
endif()
|
||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
|
||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
option(BUILD_SHARED_LIBS "Build shared gtsam library, instead of static" ON)
|
||||
option(GTSAM_USE_QUATERNIONS "Enable/Disable using an internal Quaternion representation for rotations instead of rotation matrices. If enable, Rot3::EXPMAP is enforced by default." OFF)
|
||||
option(GTSAM_POSE3_EXPMAP "Enable/Disable using Pose3::EXPMAP as the default mode. If disabled, Pose3::FIRST_ORDER will be used." ON)
|
||||
option(GTSAM_ROT3_EXPMAP "Ignore if GTSAM_USE_QUATERNIONS is OFF (Rot3::EXPMAP by default). Otherwise, enable Rot3::EXPMAP, or if disabled, use Rot3::CAYLEY." ON)
|
||||
option(GTSAM_ENABLE_CONSISTENCY_CHECKS "Enable/Disable expensive consistency checks" OFF)
|
||||
option(GTSAM_WITH_TBB "Use Intel Threaded Building Blocks (TBB) if available" ON)
|
||||
option(GTSAM_WITH_EIGEN_MKL "Eigen will use Intel MKL if available" OFF)
|
||||
option(GTSAM_WITH_EIGEN_MKL_OPENMP "Eigen, when using Intel MKL, will also use OpenMP for multithreading if available" OFF)
|
||||
option(GTSAM_THROW_CHEIRALITY_EXCEPTION "Throw exception when a triangulated point is behind a camera" ON)
|
||||
option(GTSAM_BUILD_PYTHON "Enable/Disable building & installation of Python module with pybind11" OFF)
|
||||
option(GTSAM_INSTALL_MATLAB_TOOLBOX "Enable/Disable installation of matlab toolbox" OFF)
|
||||
option(GTSAM_ALLOW_DEPRECATED_SINCE_V41 "Allow use of methods/functions deprecated in GTSAM 4.1" ON)
|
||||
option(GTSAM_SUPPORT_NESTED_DISSECTION "Support Metis-based nested dissection" ON)
|
||||
option(GTSAM_TANGENT_PREINTEGRATION "Use new ImuFactor with integration on tangent space" ON)
|
||||
option(GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR "Use the slower but correct version of BetweenFactor" OFF)
|
||||
if(NOT MSVC AND NOT XCODE_VERSION)
|
||||
option(GTSAM_BUILD_WITH_CCACHE "Use ccache compiler cache" ON)
|
||||
endif()
|
||||
|
|
|
|||
|
|
@ -7,9 +7,9 @@ if (GTSAM_WITH_TBB)
|
|||
if(TBB_FOUND)
|
||||
set(GTSAM_USE_TBB 1) # This will go into config.h
|
||||
|
||||
if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
|
||||
message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
|
||||
endif()
|
||||
# if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
|
||||
# message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
|
||||
# endif()
|
||||
|
||||
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
|
||||
set(TBB_GREATER_EQUAL_2020 1)
|
||||
|
|
|
|||
388
doc/math.lyx
388
doc/math.lyx
|
|
@ -5082,6 +5082,394 @@ reference "ex:projection"
|
|||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
Derivative of Adjoint
|
||||
\begin_inset CommandInset label
|
||||
LatexCommand label
|
||||
name "subsec:pose3_adjoint_deriv"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Consider
|
||||
\begin_inset Formula $f:SE(3)\times\mathbb{R}^{6}\rightarrow\mathbb{R}^{6}$
|
||||
\end_inset
|
||||
|
||||
is defined as
|
||||
\begin_inset Formula $f(T,\xi_{b})=Ad_{T}\hat{\xi}_{b}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
The derivative is notated (see Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "sec:Derivatives-of-Actions"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
):
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
Df_{(T,\xi_{b})}(\xi,\delta\xi_{b})=D_{1}f_{(T,\xi_{b})}(\xi)+D_{2}f_{(T,\xi_{b})}(\delta\xi_{b})
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
First, computing
|
||||
\begin_inset Formula $D_{2}f_{(T,\xi_{b})}(\xi_{b})$
|
||||
\end_inset
|
||||
|
||||
is easy, as its matrix is simply
|
||||
\begin_inset Formula $Ad_{T}$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
f(T,\xi_{b}+\delta\xi_{b})=Ad_{T}(\widehat{\xi_{b}+\delta\xi_{b}})=Ad_{T}(\hat{\xi}_{b})+Ad_{T}(\delta\hat{\xi}_{b})
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
D_{2}f_{(T,\xi_{b})}(\xi_{b})=Ad_{T}
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
We will derive
|
||||
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi)$
|
||||
\end_inset
|
||||
|
||||
using two approaches.
|
||||
In the first, we'll define
|
||||
\begin_inset Formula $g(T,\xi)\triangleq T\exp\hat{\xi}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
From Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "sec:Derivatives-of-Actions"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
,
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
D_{2}g_{(T,\xi)}(\xi) & =T\hat{\xi}\\
|
||||
D_{2}g_{(T,\xi)}^{-1}(\xi) & =-\hat{\xi}T^{-1}
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
Now we can use the definition of the Adjoint representation
|
||||
\begin_inset Formula $Ad_{g}\hat{\xi}=g\hat{\xi}g^{-1}$
|
||||
\end_inset
|
||||
|
||||
(aka conjugation by
|
||||
\begin_inset Formula $g$
|
||||
\end_inset
|
||||
|
||||
) then apply product rule and simplify:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
D_{1}f_{(T,\xi_{b})}(\xi)=D_{1}\left(Ad_{T\exp(\hat{\xi})}\hat{\xi}_{b}\right)(\xi) & =D_{1}\left(g\hat{\xi}_{b}g^{-1}\right)(\xi)\\
|
||||
& =\left(D_{2}g_{(T,\xi)}(\xi)\right)\hat{\xi}_{b}g^{-1}(T,0)+g(T,0)\hat{\xi}_{b}\left(D_{2}g_{(T,\xi)}^{-1}(\xi)\right)\\
|
||||
& =T\hat{\xi}\hat{\xi}_{b}T^{-1}-T\hat{\xi}_{b}\hat{\xi}T^{-1}\\
|
||||
& =T\left(\hat{\xi}\hat{\xi}_{b}-\hat{\xi}_{b}\hat{\xi}\right)T^{-1}\\
|
||||
& =Ad_{T}(ad_{\hat{\xi}}\hat{\xi}_{b})\\
|
||||
& =-Ad_{T}(ad_{\hat{\xi}_{b}}\hat{\xi})\\
|
||||
D_{1}F_{(T,\xi_{b})} & =-(Ad_{T})(ad_{\hat{\xi}_{b}})
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
Where
|
||||
\begin_inset Formula $ad_{\hat{\xi}}:\mathfrak{g}\rightarrow\mathfrak{g}$
|
||||
\end_inset
|
||||
|
||||
is the adjoint map of the lie algebra.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The second, perhaps more intuitive way of deriving
|
||||
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi_{b})$
|
||||
\end_inset
|
||||
|
||||
, would be to use the fact that the derivative at the origin
|
||||
\begin_inset Formula $D_{1}Ad_{I}\hat{\xi}_{b}=ad_{\hat{\xi}_{b}}$
|
||||
\end_inset
|
||||
|
||||
by definition of the adjoint
|
||||
\begin_inset Formula $ad_{\xi}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
Then applying the property
|
||||
\begin_inset Formula $Ad_{AB}=Ad_{A}Ad_{B}$
|
||||
\end_inset
|
||||
|
||||
,
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
D_{1}Ad_{T}\hat{\xi}_{b}(\xi)=D_{1}Ad_{T*I}\hat{\xi}_{b}(\xi)=Ad_{T}\left(D_{1}Ad_{I}\hat{\xi}_{b}(\xi)\right)=Ad_{T}\left(ad_{\hat{\xi}}(\hat{\xi}_{b})\right)=-Ad_{T}\left(ad_{\hat{\xi}_{b}}(\hat{\xi})\right)
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
Derivative of AdjointTranspose
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The transpose of the Adjoint,
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $Ad_{T}^{T}:\mathfrak{g^{*}\rightarrow g^{*}}$
|
||||
\end_inset
|
||||
|
||||
, is useful as a way to change the reference frame of vectors in the dual
|
||||
space
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
(note the
|
||||
\begin_inset Formula $^{*}$
|
||||
\end_inset
|
||||
|
||||
denoting that we are now in the dual space)
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
.
|
||||
To be more concrete, where
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
as
|
||||
\begin_inset Formula $Ad_{T}\hat{\xi}_{b}$
|
||||
\end_inset
|
||||
|
||||
converts the
|
||||
\emph on
|
||||
twist
|
||||
\emph default
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $\xi_{b}$
|
||||
\end_inset
|
||||
|
||||
from the
|
||||
\begin_inset Formula $T$
|
||||
\end_inset
|
||||
|
||||
frame,
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
converts the
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph on
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
wrench
|
||||
\emph default
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $\xi_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
from the
|
||||
\begin_inset Formula $T$
|
||||
\end_inset
|
||||
|
||||
frame
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
.
|
||||
It's difficult to apply a similar derivation as in Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "subsec:pose3_adjoint_deriv"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
for the derivative of
|
||||
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
because
|
||||
\begin_inset Formula $Ad_{T}^{T}$
|
||||
\end_inset
|
||||
|
||||
cannot be naturally defined as a conjugation, so we resort to crunching
|
||||
through the algebra.
|
||||
The details are omitted but the result is a form that vaguely resembles
|
||||
(but does not exactly match)
|
||||
\begin_inset Formula $ad(Ad_{T}^{T}\hat{\xi}_{b}^{*})$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
\begin{bmatrix}\omega_{T}\\
|
||||
v_{T}
|
||||
\end{bmatrix}^{*} & \triangleq Ad_{T}^{T}\hat{\xi}_{b}^{*}\\
|
||||
D_{1}Ad_{T}^{T}\hat{\xi}_{b}^{*}(\xi) & =\begin{bmatrix}\hat{\omega}_{T} & \hat{v}_{T}\\
|
||||
\hat{v}_{T} & 0
|
||||
\end{bmatrix}
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
|
|
|
|||
BIN
doc/math.pdf
BIN
doc/math.pdf
Binary file not shown.
|
|
@ -56,8 +56,8 @@ int main(int argc, char **argv) {
|
|||
DiscreteBayesNet::shared_ptr chordal = fg.eliminateSequential(ordering);
|
||||
|
||||
// solve
|
||||
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||
GTSAM_PRINT(*mpe);
|
||||
auto mpe = chordal->optimize();
|
||||
GTSAM_PRINT(mpe);
|
||||
|
||||
// We can also build a Bayes tree (directed junction tree).
|
||||
// The elimination order above will do fine:
|
||||
|
|
@ -70,14 +70,14 @@ int main(int argc, char **argv) {
|
|||
|
||||
// solve again, now with evidence
|
||||
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||
DiscreteFactor::sharedValues mpe2 = chordal2->optimize();
|
||||
GTSAM_PRINT(*mpe2);
|
||||
auto mpe2 = chordal2->optimize();
|
||||
GTSAM_PRINT(mpe2);
|
||||
|
||||
// We can also sample from it
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
DiscreteFactor::sharedValues sample = chordal2->sample();
|
||||
GTSAM_PRINT(*sample);
|
||||
auto sample = chordal2->sample();
|
||||
GTSAM_PRINT(sample);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -33,11 +33,11 @@ using namespace gtsam;
|
|||
int main(int argc, char **argv) {
|
||||
// Define keys and a print function
|
||||
Key C(1), S(2), R(3), W(4);
|
||||
auto print = [=](DiscreteFactor::sharedValues values) {
|
||||
cout << boolalpha << "Cloudy = " << static_cast<bool>((*values)[C])
|
||||
<< " Sprinkler = " << static_cast<bool>((*values)[S])
|
||||
<< " Rain = " << boolalpha << static_cast<bool>((*values)[R])
|
||||
<< " WetGrass = " << static_cast<bool>((*values)[W]) << endl;
|
||||
auto print = [=](const DiscreteFactor::Values& values) {
|
||||
cout << boolalpha << "Cloudy = " << static_cast<bool>(values.at(C))
|
||||
<< " Sprinkler = " << static_cast<bool>(values.at(S))
|
||||
<< " Rain = " << boolalpha << static_cast<bool>(values.at(R))
|
||||
<< " WetGrass = " << static_cast<bool>(values.at(W)) << endl;
|
||||
};
|
||||
|
||||
// We assume binary state variables
|
||||
|
|
@ -85,7 +85,7 @@ int main(int argc, char **argv) {
|
|||
}
|
||||
|
||||
// "Most Probable Explanation", i.e., configuration with largest value
|
||||
DiscreteFactor::sharedValues mpe = graph.eliminateSequential()->optimize();
|
||||
auto mpe = graph.eliminateSequential()->optimize();
|
||||
cout << "\nMost Probable Explanation (MPE):" << endl;
|
||||
print(mpe);
|
||||
|
||||
|
|
@ -97,7 +97,7 @@ int main(int argc, char **argv) {
|
|||
|
||||
// solve again, now with evidence
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues mpe_with_evidence = chordal->optimize();
|
||||
auto mpe_with_evidence = chordal->optimize();
|
||||
|
||||
cout << "\nMPE given C=0:" << endl;
|
||||
print(mpe_with_evidence);
|
||||
|
|
@ -113,7 +113,7 @@ int main(int argc, char **argv) {
|
|||
// We can also sample from it
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t i = 0; i < 10; i++) {
|
||||
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||
auto sample = chordal->sample();
|
||||
print(sample);
|
||||
}
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -66,14 +66,14 @@ int main(int argc, char **argv) {
|
|||
chordal->print("Eliminated");
|
||||
|
||||
// solve
|
||||
DiscreteFactor::sharedValues mpe = chordal->optimize();
|
||||
GTSAM_PRINT(*mpe);
|
||||
auto mpe = chordal->optimize();
|
||||
GTSAM_PRINT(mpe);
|
||||
|
||||
// We can also sample from it
|
||||
cout << "\n10 samples:" << endl;
|
||||
for (size_t k = 0; k < 10; k++) {
|
||||
DiscreteFactor::sharedValues sample = chordal->sample();
|
||||
GTSAM_PRINT(*sample);
|
||||
auto sample = chordal->sample();
|
||||
GTSAM_PRINT(sample);
|
||||
}
|
||||
|
||||
// Or compute the marginals. This re-eliminates the FG into a Bayes tree
|
||||
|
|
|
|||
|
|
@ -11,21 +11,23 @@
|
|||
|
||||
/**
|
||||
* @file IMUKittiExampleGPS
|
||||
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI VISION BENCHMARK SUITE
|
||||
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ Electronics
|
||||
* @brief Example of application of ISAM2 for GPS-aided navigation on the KITTI
|
||||
* VISION BENCHMARK SUITE
|
||||
* @author Ported by Thomas Jespersen (thomasj@tkjelectronics.dk), TKJ
|
||||
* Electronics
|
||||
*/
|
||||
|
||||
// GTSAM related includes.
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/navigation/CombinedImuFactor.h>
|
||||
#include <gtsam/navigation/GPSFactor.h>
|
||||
#include <gtsam/navigation/ImuFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
#include <gtsam/nonlinear/ISAM2Params.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
|
|
@ -34,35 +36,35 @@
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||
using symbol_shorthand::B; // Bias (ax,ay,az,gx,gy,gz)
|
||||
using symbol_shorthand::V; // Vel (xdot,ydot,zdot)
|
||||
using symbol_shorthand::X; // Pose3 (x,y,z,r,p,y)
|
||||
|
||||
struct KittiCalibration {
|
||||
double body_ptx;
|
||||
double body_pty;
|
||||
double body_ptz;
|
||||
double body_prx;
|
||||
double body_pry;
|
||||
double body_prz;
|
||||
double accelerometer_sigma;
|
||||
double gyroscope_sigma;
|
||||
double integration_sigma;
|
||||
double accelerometer_bias_sigma;
|
||||
double gyroscope_bias_sigma;
|
||||
double average_delta_t;
|
||||
double body_ptx;
|
||||
double body_pty;
|
||||
double body_ptz;
|
||||
double body_prx;
|
||||
double body_pry;
|
||||
double body_prz;
|
||||
double accelerometer_sigma;
|
||||
double gyroscope_sigma;
|
||||
double integration_sigma;
|
||||
double accelerometer_bias_sigma;
|
||||
double gyroscope_bias_sigma;
|
||||
double average_delta_t;
|
||||
};
|
||||
|
||||
struct ImuMeasurement {
|
||||
double time;
|
||||
double dt;
|
||||
Vector3 accelerometer;
|
||||
Vector3 gyroscope; // omega
|
||||
double time;
|
||||
double dt;
|
||||
Vector3 accelerometer;
|
||||
Vector3 gyroscope; // omega
|
||||
};
|
||||
|
||||
struct GpsMeasurement {
|
||||
double time;
|
||||
Vector3 position; // x,y,z
|
||||
double time;
|
||||
Vector3 position; // x,y,z
|
||||
};
|
||||
|
||||
const string output_filename = "IMUKittiExampleGPSResults.csv";
|
||||
|
|
@ -70,290 +72,313 @@ const string output_filename = "IMUKittiExampleGPSResults.csv";
|
|||
void loadKittiData(KittiCalibration& kitti_calibration,
|
||||
vector<ImuMeasurement>& imu_measurements,
|
||||
vector<GpsMeasurement>& gps_measurements) {
|
||||
string line;
|
||||
string line;
|
||||
|
||||
// Read IMU metadata and compute relative sensor pose transforms
|
||||
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma GyroscopeSigma IntegrationSigma
|
||||
// AccelerometerBiasSigma GyroscopeBiasSigma AverageDeltaT
|
||||
string imu_metadata_file = findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
|
||||
ifstream imu_metadata(imu_metadata_file.c_str());
|
||||
// Read IMU metadata and compute relative sensor pose transforms
|
||||
// BodyPtx BodyPty BodyPtz BodyPrx BodyPry BodyPrz AccelerometerSigma
|
||||
// GyroscopeSigma IntegrationSigma AccelerometerBiasSigma GyroscopeBiasSigma
|
||||
// AverageDeltaT
|
||||
string imu_metadata_file =
|
||||
findExampleDataFile("KittiEquivBiasedImu_metadata.txt");
|
||||
ifstream imu_metadata(imu_metadata_file.c_str());
|
||||
|
||||
printf("-- Reading sensor metadata\n");
|
||||
printf("-- Reading sensor metadata\n");
|
||||
|
||||
getline(imu_metadata, line, '\n'); // ignore the first line
|
||||
getline(imu_metadata, line, '\n'); // ignore the first line
|
||||
|
||||
// Load Kitti calibration
|
||||
getline(imu_metadata, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&kitti_calibration.body_ptx,
|
||||
&kitti_calibration.body_pty,
|
||||
&kitti_calibration.body_ptz,
|
||||
&kitti_calibration.body_prx,
|
||||
&kitti_calibration.body_pry,
|
||||
&kitti_calibration.body_prz,
|
||||
&kitti_calibration.accelerometer_sigma,
|
||||
&kitti_calibration.gyroscope_sigma,
|
||||
&kitti_calibration.integration_sigma,
|
||||
&kitti_calibration.accelerometer_bias_sigma,
|
||||
&kitti_calibration.gyroscope_bias_sigma,
|
||||
&kitti_calibration.average_delta_t);
|
||||
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
|
||||
kitti_calibration.body_ptx,
|
||||
kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz,
|
||||
kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry,
|
||||
kitti_calibration.body_prz,
|
||||
kitti_calibration.accelerometer_sigma,
|
||||
kitti_calibration.gyroscope_sigma,
|
||||
kitti_calibration.integration_sigma,
|
||||
kitti_calibration.accelerometer_bias_sigma,
|
||||
kitti_calibration.gyroscope_bias_sigma,
|
||||
kitti_calibration.average_delta_t);
|
||||
// Load Kitti calibration
|
||||
getline(imu_metadata, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&kitti_calibration.body_ptx, &kitti_calibration.body_pty,
|
||||
&kitti_calibration.body_ptz, &kitti_calibration.body_prx,
|
||||
&kitti_calibration.body_pry, &kitti_calibration.body_prz,
|
||||
&kitti_calibration.accelerometer_sigma,
|
||||
&kitti_calibration.gyroscope_sigma,
|
||||
&kitti_calibration.integration_sigma,
|
||||
&kitti_calibration.accelerometer_bias_sigma,
|
||||
&kitti_calibration.gyroscope_bias_sigma,
|
||||
&kitti_calibration.average_delta_t);
|
||||
printf("IMU metadata: %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf\n",
|
||||
kitti_calibration.body_ptx, kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz, kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry, kitti_calibration.body_prz,
|
||||
kitti_calibration.accelerometer_sigma,
|
||||
kitti_calibration.gyroscope_sigma, kitti_calibration.integration_sigma,
|
||||
kitti_calibration.accelerometer_bias_sigma,
|
||||
kitti_calibration.gyroscope_bias_sigma,
|
||||
kitti_calibration.average_delta_t);
|
||||
|
||||
// Read IMU data
|
||||
// Time dt accelX accelY accelZ omegaX omegaY omegaZ
|
||||
string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
|
||||
printf("-- Reading IMU measurements from file\n");
|
||||
{
|
||||
ifstream imu_data(imu_data_file.c_str());
|
||||
getline(imu_data, line, '\n'); // ignore the first line
|
||||
// Read IMU data
|
||||
// Time dt accelX accelY accelZ omegaX omegaY omegaZ
|
||||
string imu_data_file = findExampleDataFile("KittiEquivBiasedImu.txt");
|
||||
printf("-- Reading IMU measurements from file\n");
|
||||
{
|
||||
ifstream imu_data(imu_data_file.c_str());
|
||||
getline(imu_data, line, '\n'); // ignore the first line
|
||||
|
||||
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0, gyro_y = 0, gyro_z = 0;
|
||||
while (!imu_data.eof()) {
|
||||
getline(imu_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf",
|
||||
&time, &dt,
|
||||
&acc_x, &acc_y, &acc_z,
|
||||
&gyro_x, &gyro_y, &gyro_z);
|
||||
double time = 0, dt = 0, acc_x = 0, acc_y = 0, acc_z = 0, gyro_x = 0,
|
||||
gyro_y = 0, gyro_z = 0;
|
||||
while (!imu_data.eof()) {
|
||||
getline(imu_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf %lf %lf %lf %lf %lf %lf %lf", &time, &dt,
|
||||
&acc_x, &acc_y, &acc_z, &gyro_x, &gyro_y, &gyro_z);
|
||||
|
||||
ImuMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.dt = dt;
|
||||
measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
|
||||
measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
|
||||
imu_measurements.push_back(measurement);
|
||||
}
|
||||
ImuMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.dt = dt;
|
||||
measurement.accelerometer = Vector3(acc_x, acc_y, acc_z);
|
||||
measurement.gyroscope = Vector3(gyro_x, gyro_y, gyro_z);
|
||||
imu_measurements.push_back(measurement);
|
||||
}
|
||||
}
|
||||
|
||||
// Read GPS data
|
||||
// Time,X,Y,Z
|
||||
string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
|
||||
printf("-- Reading GPS measurements from file\n");
|
||||
{
|
||||
ifstream gps_data(gps_data_file.c_str());
|
||||
getline(gps_data, line, '\n'); // ignore the first line
|
||||
// Read GPS data
|
||||
// Time,X,Y,Z
|
||||
string gps_data_file = findExampleDataFile("KittiGps_converted.txt");
|
||||
printf("-- Reading GPS measurements from file\n");
|
||||
{
|
||||
ifstream gps_data(gps_data_file.c_str());
|
||||
getline(gps_data, line, '\n'); // ignore the first line
|
||||
|
||||
double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
|
||||
while (!gps_data.eof()) {
|
||||
getline(gps_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
|
||||
double time = 0, gps_x = 0, gps_y = 0, gps_z = 0;
|
||||
while (!gps_data.eof()) {
|
||||
getline(gps_data, line, '\n');
|
||||
sscanf(line.c_str(), "%lf,%lf,%lf,%lf", &time, &gps_x, &gps_y, &gps_z);
|
||||
|
||||
GpsMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.position = Vector3(gps_x, gps_y, gps_z);
|
||||
gps_measurements.push_back(measurement);
|
||||
}
|
||||
GpsMeasurement measurement;
|
||||
measurement.time = time;
|
||||
measurement.position = Vector3(gps_x, gps_y, gps_z);
|
||||
gps_measurements.push_back(measurement);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
KittiCalibration kitti_calibration;
|
||||
vector<ImuMeasurement> imu_measurements;
|
||||
vector<GpsMeasurement> gps_measurements;
|
||||
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
||||
KittiCalibration kitti_calibration;
|
||||
vector<ImuMeasurement> imu_measurements;
|
||||
vector<GpsMeasurement> gps_measurements;
|
||||
loadKittiData(kitti_calibration, imu_measurements, gps_measurements);
|
||||
|
||||
Vector6 BodyP = (Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty, kitti_calibration.body_ptz,
|
||||
kitti_calibration.body_prx, kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||
.finished();
|
||||
auto body_T_imu = Pose3::Expmap(BodyP);
|
||||
if (!body_T_imu.equals(Pose3(), 1e-5)) {
|
||||
printf("Currently only support IMUinBody is identity, i.e. IMU and body frame are the same");
|
||||
exit(-1);
|
||||
}
|
||||
Vector6 BodyP =
|
||||
(Vector6() << kitti_calibration.body_ptx, kitti_calibration.body_pty,
|
||||
kitti_calibration.body_ptz, kitti_calibration.body_prx,
|
||||
kitti_calibration.body_pry, kitti_calibration.body_prz)
|
||||
.finished();
|
||||
auto body_T_imu = Pose3::Expmap(BodyP);
|
||||
if (!body_T_imu.equals(Pose3(), 1e-5)) {
|
||||
printf(
|
||||
"Currently only support IMUinBody is identity, i.e. IMU and body frame "
|
||||
"are the same");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
// Configure different variables
|
||||
// double t_offset = gps_measurements[0].time;
|
||||
size_t first_gps_pose = 1;
|
||||
size_t gps_skip = 10; // Skip this many GPS measurements each time
|
||||
double g = 9.8;
|
||||
auto w_coriolis = Vector3::Zero(); // zero vector
|
||||
// Configure different variables
|
||||
// double t_offset = gps_measurements[0].time;
|
||||
size_t first_gps_pose = 1;
|
||||
size_t gps_skip = 10; // Skip this many GPS measurements each time
|
||||
double g = 9.8;
|
||||
auto w_coriolis = Vector3::Zero(); // zero vector
|
||||
|
||||
// Configure noise models
|
||||
auto noise_model_gps = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||
Vector3::Constant(1.0/0.07))
|
||||
.finished());
|
||||
// Configure noise models
|
||||
auto noise_model_gps = noiseModel::Diagonal::Precisions(
|
||||
(Vector6() << Vector3::Constant(0), Vector3::Constant(1.0 / 0.07))
|
||||
.finished());
|
||||
|
||||
// Set initial conditions for the estimated trajectory
|
||||
// initial pose is the reference frame (navigation frame)
|
||||
auto current_pose_global = Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||
// the vehicle is stationary at the beginning at position 0,0,0
|
||||
Vector3 current_velocity_global = Vector3::Zero();
|
||||
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
||||
// Set initial conditions for the estimated trajectory
|
||||
// initial pose is the reference frame (navigation frame)
|
||||
auto current_pose_global =
|
||||
Pose3(Rot3(), gps_measurements[first_gps_pose].position);
|
||||
// the vehicle is stationary at the beginning at position 0,0,0
|
||||
Vector3 current_velocity_global = Vector3::Zero();
|
||||
auto current_bias = imuBias::ConstantBias(); // init with zero bias
|
||||
|
||||
auto sigma_init_x = noiseModel::Diagonal::Precisions((Vector6() << Vector3::Constant(0),
|
||||
Vector3::Constant(1.0))
|
||||
.finished());
|
||||
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
||||
auto sigma_init_b = noiseModel::Diagonal::Sigmas((Vector6() << Vector3::Constant(0.100),
|
||||
Vector3::Constant(5.00e-05))
|
||||
.finished());
|
||||
auto sigma_init_x = noiseModel::Diagonal::Precisions(
|
||||
(Vector6() << Vector3::Constant(0), Vector3::Constant(1.0)).finished());
|
||||
auto sigma_init_v = noiseModel::Diagonal::Sigmas(Vector3::Constant(1000.0));
|
||||
auto sigma_init_b = noiseModel::Diagonal::Sigmas(
|
||||
(Vector6() << Vector3::Constant(0.100), Vector3::Constant(5.00e-05))
|
||||
.finished());
|
||||
|
||||
// Set IMU preintegration parameters
|
||||
Matrix33 measured_acc_cov = I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
|
||||
Matrix33 measured_omega_cov = I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
|
||||
// error committed in integrating position from velocities
|
||||
Matrix33 integration_error_cov = I_3x3 * pow(kitti_calibration.integration_sigma, 2);
|
||||
// Set IMU preintegration parameters
|
||||
Matrix33 measured_acc_cov =
|
||||
I_3x3 * pow(kitti_calibration.accelerometer_sigma, 2);
|
||||
Matrix33 measured_omega_cov =
|
||||
I_3x3 * pow(kitti_calibration.gyroscope_sigma, 2);
|
||||
// error committed in integrating position from velocities
|
||||
Matrix33 integration_error_cov =
|
||||
I_3x3 * pow(kitti_calibration.integration_sigma, 2);
|
||||
|
||||
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
|
||||
imu_params->accelerometerCovariance = measured_acc_cov; // acc white noise in continuous
|
||||
imu_params->integrationCovariance = integration_error_cov; // integration uncertainty continuous
|
||||
imu_params->gyroscopeCovariance = measured_omega_cov; // gyro white noise in continuous
|
||||
imu_params->omegaCoriolis = w_coriolis;
|
||||
auto imu_params = PreintegratedImuMeasurements::Params::MakeSharedU(g);
|
||||
imu_params->accelerometerCovariance =
|
||||
measured_acc_cov; // acc white noise in continuous
|
||||
imu_params->integrationCovariance =
|
||||
integration_error_cov; // integration uncertainty continuous
|
||||
imu_params->gyroscopeCovariance =
|
||||
measured_omega_cov; // gyro white noise in continuous
|
||||
imu_params->omegaCoriolis = w_coriolis;
|
||||
|
||||
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement = nullptr;
|
||||
std::shared_ptr<PreintegratedImuMeasurements> current_summarized_measurement =
|
||||
nullptr;
|
||||
|
||||
// Set ISAM2 parameters and create ISAM2 solver object
|
||||
ISAM2Params isam_params;
|
||||
isam_params.factorization = ISAM2Params::CHOLESKY;
|
||||
isam_params.relinearizeSkip = 10;
|
||||
// Set ISAM2 parameters and create ISAM2 solver object
|
||||
ISAM2Params isam_params;
|
||||
isam_params.factorization = ISAM2Params::CHOLESKY;
|
||||
isam_params.relinearizeSkip = 10;
|
||||
|
||||
ISAM2 isam(isam_params);
|
||||
ISAM2 isam(isam_params);
|
||||
|
||||
// Create the factor graph and values object that will store new factors and values to add to the incremental graph
|
||||
NonlinearFactorGraph new_factors;
|
||||
Values new_values; // values storing the initial estimates of new nodes in the factor graph
|
||||
// Create the factor graph and values object that will store new factors and
|
||||
// values to add to the incremental graph
|
||||
NonlinearFactorGraph new_factors;
|
||||
Values new_values; // values storing the initial estimates of new nodes in
|
||||
// the factor graph
|
||||
|
||||
/// Main loop:
|
||||
/// (1) we read the measurements
|
||||
/// (2) we create the corresponding factors in the graph
|
||||
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory
|
||||
printf("-- Starting main loop: inference is performed at each time step, but we plot trajectory every 10 steps\n");
|
||||
size_t j = 0;
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
// At each non=IMU measurement we initialize a new node in the graph
|
||||
auto current_pose_key = X(i);
|
||||
auto current_vel_key = V(i);
|
||||
auto current_bias_key = B(i);
|
||||
double t = gps_measurements[i].time;
|
||||
/// Main loop:
|
||||
/// (1) we read the measurements
|
||||
/// (2) we create the corresponding factors in the graph
|
||||
/// (3) we solve the graph to obtain and optimal estimate of robot trajectory
|
||||
printf(
|
||||
"-- Starting main loop: inference is performed at each time step, but we "
|
||||
"plot trajectory every 10 steps\n");
|
||||
size_t j = 0;
|
||||
size_t included_imu_measurement_count = 0;
|
||||
|
||||
if (i == first_gps_pose) {
|
||||
// Create initial estimate and prior on initial pose, velocity, and biases
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, current_pose_global, sigma_init_x);
|
||||
new_factors.emplace_shared<PriorFactor<Vector3>>(current_vel_key, current_velocity_global, sigma_init_v);
|
||||
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(current_bias_key, current_bias, sigma_init_b);
|
||||
} else {
|
||||
double t_previous = gps_measurements[i-1].time;
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
// At each non=IMU measurement we initialize a new node in the graph
|
||||
auto current_pose_key = X(i);
|
||||
auto current_vel_key = V(i);
|
||||
auto current_bias_key = B(i);
|
||||
double t = gps_measurements[i].time;
|
||||
|
||||
// Summarize IMU data between the previous GPS measurement and now
|
||||
current_summarized_measurement = std::make_shared<PreintegratedImuMeasurements>(imu_params, current_bias);
|
||||
static size_t included_imu_measurement_count = 0;
|
||||
while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
|
||||
if (imu_measurements[j].time >= t_previous) {
|
||||
current_summarized_measurement->integrateMeasurement(imu_measurements[j].accelerometer,
|
||||
imu_measurements[j].gyroscope,
|
||||
imu_measurements[j].dt);
|
||||
included_imu_measurement_count++;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
if (i == first_gps_pose) {
|
||||
// Create initial estimate and prior on initial pose, velocity, and biases
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(
|
||||
current_pose_key, current_pose_global, sigma_init_x);
|
||||
new_factors.emplace_shared<PriorFactor<Vector3>>(
|
||||
current_vel_key, current_velocity_global, sigma_init_v);
|
||||
new_factors.emplace_shared<PriorFactor<imuBias::ConstantBias>>(
|
||||
current_bias_key, current_bias, sigma_init_b);
|
||||
} else {
|
||||
double t_previous = gps_measurements[i - 1].time;
|
||||
|
||||
// Create IMU factor
|
||||
auto previous_pose_key = X(i-1);
|
||||
auto previous_vel_key = V(i-1);
|
||||
auto previous_bias_key = B(i-1);
|
||||
// Summarize IMU data between the previous GPS measurement and now
|
||||
current_summarized_measurement =
|
||||
std::make_shared<PreintegratedImuMeasurements>(imu_params,
|
||||
current_bias);
|
||||
|
||||
new_factors.emplace_shared<ImuFactor>(previous_pose_key, previous_vel_key,
|
||||
current_pose_key, current_vel_key,
|
||||
previous_bias_key, *current_summarized_measurement);
|
||||
|
||||
// Bias evolution as given in the IMU metadata
|
||||
auto sigma_between_b = noiseModel::Diagonal::Sigmas((Vector6() <<
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.accelerometer_bias_sigma),
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) * kitti_calibration.gyroscope_bias_sigma))
|
||||
.finished());
|
||||
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(previous_bias_key,
|
||||
current_bias_key,
|
||||
imuBias::ConstantBias(),
|
||||
sigma_between_b);
|
||||
|
||||
// Create GPS factor
|
||||
auto gps_pose = Pose3(current_pose_global.rotation(), gps_measurements[i].position);
|
||||
if ((i % gps_skip) == 0) {
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(current_pose_key, gps_pose, noise_model_gps);
|
||||
new_values.insert(current_pose_key, gps_pose);
|
||||
|
||||
printf("################ POSE INCLUDED AT TIME %lf ################\n", t);
|
||||
cout << gps_pose.translation();
|
||||
printf("\n\n");
|
||||
} else {
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
}
|
||||
|
||||
// Add initial values for velocity and bias based on the previous estimates
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
|
||||
// Update solver
|
||||
// =======================================================================
|
||||
// We accumulate 2*GPSskip GPS measurements before updating the solver at
|
||||
// first so that the heading becomes observable.
|
||||
if (i > (first_gps_pose + 2*gps_skip)) {
|
||||
printf("################ NEW FACTORS AT TIME %lf ################\n", t);
|
||||
new_factors.print();
|
||||
|
||||
isam.update(new_factors, new_values);
|
||||
|
||||
// Reset the newFactors and newValues list
|
||||
new_factors.resize(0);
|
||||
new_values.clear();
|
||||
|
||||
// Extract the result/current estimates
|
||||
Values result = isam.calculateEstimate();
|
||||
|
||||
current_pose_global = result.at<Pose3>(current_pose_key);
|
||||
current_velocity_global = result.at<Vector3>(current_vel_key);
|
||||
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
|
||||
|
||||
printf("\n################ POSE AT TIME %lf ################\n", t);
|
||||
current_pose_global.print();
|
||||
printf("\n\n");
|
||||
}
|
||||
while (j < imu_measurements.size() && imu_measurements[j].time <= t) {
|
||||
if (imu_measurements[j].time >= t_previous) {
|
||||
current_summarized_measurement->integrateMeasurement(
|
||||
imu_measurements[j].accelerometer, imu_measurements[j].gyroscope,
|
||||
imu_measurements[j].dt);
|
||||
included_imu_measurement_count++;
|
||||
}
|
||||
j++;
|
||||
}
|
||||
|
||||
// Create IMU factor
|
||||
auto previous_pose_key = X(i - 1);
|
||||
auto previous_vel_key = V(i - 1);
|
||||
auto previous_bias_key = B(i - 1);
|
||||
|
||||
new_factors.emplace_shared<ImuFactor>(
|
||||
previous_pose_key, previous_vel_key, current_pose_key,
|
||||
current_vel_key, previous_bias_key, *current_summarized_measurement);
|
||||
|
||||
// Bias evolution as given in the IMU metadata
|
||||
auto sigma_between_b = noiseModel::Diagonal::Sigmas(
|
||||
(Vector6() << Vector3::Constant(
|
||||
sqrt(included_imu_measurement_count) *
|
||||
kitti_calibration.accelerometer_bias_sigma),
|
||||
Vector3::Constant(sqrt(included_imu_measurement_count) *
|
||||
kitti_calibration.gyroscope_bias_sigma))
|
||||
.finished());
|
||||
new_factors.emplace_shared<BetweenFactor<imuBias::ConstantBias>>(
|
||||
previous_bias_key, current_bias_key, imuBias::ConstantBias(),
|
||||
sigma_between_b);
|
||||
|
||||
// Create GPS factor
|
||||
auto gps_pose =
|
||||
Pose3(current_pose_global.rotation(), gps_measurements[i].position);
|
||||
if ((i % gps_skip) == 0) {
|
||||
new_factors.emplace_shared<PriorFactor<Pose3>>(
|
||||
current_pose_key, gps_pose, noise_model_gps);
|
||||
new_values.insert(current_pose_key, gps_pose);
|
||||
|
||||
printf("############ POSE INCLUDED AT TIME %.6lf ############\n",
|
||||
t);
|
||||
cout << gps_pose.translation();
|
||||
printf("\n\n");
|
||||
} else {
|
||||
new_values.insert(current_pose_key, current_pose_global);
|
||||
}
|
||||
|
||||
// Add initial values for velocity and bias based on the previous
|
||||
// estimates
|
||||
new_values.insert(current_vel_key, current_velocity_global);
|
||||
new_values.insert(current_bias_key, current_bias);
|
||||
|
||||
// Update solver
|
||||
// =======================================================================
|
||||
// We accumulate 2*GPSskip GPS measurements before updating the solver at
|
||||
// first so that the heading becomes observable.
|
||||
if (i > (first_gps_pose + 2 * gps_skip)) {
|
||||
printf("############ NEW FACTORS AT TIME %.6lf ############\n",
|
||||
t);
|
||||
new_factors.print();
|
||||
|
||||
isam.update(new_factors, new_values);
|
||||
|
||||
// Reset the newFactors and newValues list
|
||||
new_factors.resize(0);
|
||||
new_values.clear();
|
||||
|
||||
// Extract the result/current estimates
|
||||
Values result = isam.calculateEstimate();
|
||||
|
||||
current_pose_global = result.at<Pose3>(current_pose_key);
|
||||
current_velocity_global = result.at<Vector3>(current_vel_key);
|
||||
current_bias = result.at<imuBias::ConstantBias>(current_bias_key);
|
||||
|
||||
printf("\n############ POSE AT TIME %lf ############\n", t);
|
||||
current_pose_global.print();
|
||||
printf("\n\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Save results to file
|
||||
printf("\nWriting results to file...\n");
|
||||
FILE* fp_out = fopen(output_filename.c_str(), "w+");
|
||||
fprintf(fp_out, "#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
|
||||
// Save results to file
|
||||
printf("\nWriting results to file...\n");
|
||||
FILE* fp_out = fopen(output_filename.c_str(), "w+");
|
||||
fprintf(fp_out,
|
||||
"#time(s),x(m),y(m),z(m),qx,qy,qz,qw,gt_x(m),gt_y(m),gt_z(m)\n");
|
||||
|
||||
Values result = isam.calculateEstimate();
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
auto pose_key = X(i);
|
||||
auto vel_key = V(i);
|
||||
auto bias_key = B(i);
|
||||
Values result = isam.calculateEstimate();
|
||||
for (size_t i = first_gps_pose; i < gps_measurements.size() - 1; i++) {
|
||||
auto pose_key = X(i);
|
||||
auto vel_key = V(i);
|
||||
auto bias_key = B(i);
|
||||
|
||||
auto pose = result.at<Pose3>(pose_key);
|
||||
auto velocity = result.at<Vector3>(vel_key);
|
||||
auto bias = result.at<imuBias::ConstantBias>(bias_key);
|
||||
auto pose = result.at<Pose3>(pose_key);
|
||||
auto velocity = result.at<Vector3>(vel_key);
|
||||
auto bias = result.at<imuBias::ConstantBias>(bias_key);
|
||||
|
||||
auto pose_quat = pose.rotation().toQuaternion();
|
||||
auto gps = gps_measurements[i].position;
|
||||
auto pose_quat = pose.rotation().toQuaternion();
|
||||
auto gps = gps_measurements[i].position;
|
||||
|
||||
cout << "State at #" << i << endl;
|
||||
cout << "Pose:" << endl << pose << endl;
|
||||
cout << "Velocity:" << endl << velocity << endl;
|
||||
cout << "Bias:" << endl << bias << endl;
|
||||
cout << "State at #" << i << endl;
|
||||
cout << "Pose:" << endl << pose << endl;
|
||||
cout << "Velocity:" << endl << velocity << endl;
|
||||
cout << "Bias:" << endl << bias << endl;
|
||||
|
||||
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||
gps_measurements[i].time,
|
||||
pose.x(), pose.y(), pose.z(),
|
||||
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(),
|
||||
gps(0), gps(1), gps(2));
|
||||
}
|
||||
fprintf(fp_out, "%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",
|
||||
gps_measurements[i].time, pose.x(), pose.y(), pose.z(),
|
||||
pose_quat.x(), pose_quat.y(), pose_quat.z(), pose_quat.w(), gps(0),
|
||||
gps(1), gps(2));
|
||||
}
|
||||
|
||||
fclose(fp_out);
|
||||
fclose(fp_out);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,8 +70,8 @@ int main(int argc, char** argv) {
|
|||
// "Decoding", i.e., configuration with largest value
|
||||
// We use sequential variable elimination
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||
optimalDecoding->print("\nMost Probable Explanation (optimalDecoding)\n");
|
||||
auto optimalDecoding = chordal->optimize();
|
||||
optimalDecoding.print("\nMost Probable Explanation (optimalDecoding)\n");
|
||||
|
||||
// "Inference" Computing marginals for each node
|
||||
// Here we'll make use of DiscreteMarginals class, which makes use of
|
||||
|
|
|
|||
|
|
@ -63,8 +63,8 @@ int main(int argc, char** argv) {
|
|||
// "Decoding", i.e., configuration with largest value (MPE)
|
||||
// We use sequential variable elimination
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues optimalDecoding = chordal->optimize();
|
||||
optimalDecoding->print("\noptimalDecoding");
|
||||
auto optimalDecoding = chordal->optimize();
|
||||
GTSAM_PRINT(optimalDecoding);
|
||||
|
||||
// "Inference" Computing marginals
|
||||
cout << "\nComputing Node Marginals .." << endl;
|
||||
|
|
|
|||
|
|
@ -370,4 +370,4 @@ public:
|
|||
* the gtsam namespace to be more easily enforced as testable
|
||||
*/
|
||||
#define GTSAM_CONCEPT_LIE_INST(T) template class gtsam::IsLieGroup<T>;
|
||||
#define GTSAM_CONCEPT_LIE_TYPE(T) typedef gtsam::IsLieGroup<T> _gtsam_IsLieGroup_##T;
|
||||
#define GTSAM_CONCEPT_LIE_TYPE(T) using _gtsam_IsLieGroup_##T = gtsam::IsLieGroup<T>;
|
||||
|
|
|
|||
|
|
@ -178,4 +178,4 @@ struct FixedDimension {
|
|||
// * the gtsam namespace to be more easily enforced as testable
|
||||
// */
|
||||
#define GTSAM_CONCEPT_MANIFOLD_INST(T) template class gtsam::IsManifold<T>;
|
||||
#define GTSAM_CONCEPT_MANIFOLD_TYPE(T) typedef gtsam::IsManifold<T> _gtsam_IsManifold_##T;
|
||||
#define GTSAM_CONCEPT_MANIFOLD_TYPE(T) using _gtsam_IsManifold_##T = gtsam::IsManifold<T>;
|
||||
|
|
|
|||
|
|
@ -46,28 +46,28 @@ typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> M
|
|||
// Create handy typedefs and constants for square-size matrices
|
||||
// MatrixMN, MatrixN = MatrixNN, I_NxN, and Z_NxN, for M,N=1..9
|
||||
#define GTSAM_MAKE_MATRIX_DEFS(N) \
|
||||
typedef Eigen::Matrix<double, N, N> Matrix##N; \
|
||||
typedef Eigen::Matrix<double, 1, N> Matrix1##N; \
|
||||
typedef Eigen::Matrix<double, 2, N> Matrix2##N; \
|
||||
typedef Eigen::Matrix<double, 3, N> Matrix3##N; \
|
||||
typedef Eigen::Matrix<double, 4, N> Matrix4##N; \
|
||||
typedef Eigen::Matrix<double, 5, N> Matrix5##N; \
|
||||
typedef Eigen::Matrix<double, 6, N> Matrix6##N; \
|
||||
typedef Eigen::Matrix<double, 7, N> Matrix7##N; \
|
||||
typedef Eigen::Matrix<double, 8, N> Matrix8##N; \
|
||||
typedef Eigen::Matrix<double, 9, N> Matrix9##N; \
|
||||
using Matrix##N = Eigen::Matrix<double, N, N>; \
|
||||
using Matrix1##N = Eigen::Matrix<double, 1, N>; \
|
||||
using Matrix2##N = Eigen::Matrix<double, 2, N>; \
|
||||
using Matrix3##N = Eigen::Matrix<double, 3, N>; \
|
||||
using Matrix4##N = Eigen::Matrix<double, 4, N>; \
|
||||
using Matrix5##N = Eigen::Matrix<double, 5, N>; \
|
||||
using Matrix6##N = Eigen::Matrix<double, 6, N>; \
|
||||
using Matrix7##N = Eigen::Matrix<double, 7, N>; \
|
||||
using Matrix8##N = Eigen::Matrix<double, 8, N>; \
|
||||
using Matrix9##N = Eigen::Matrix<double, 9, N>; \
|
||||
static const Eigen::MatrixBase<Matrix##N>::IdentityReturnType I_##N##x##N = Matrix##N::Identity(); \
|
||||
static const Eigen::MatrixBase<Matrix##N>::ConstantReturnType Z_##N##x##N = Matrix##N::Zero();
|
||||
|
||||
GTSAM_MAKE_MATRIX_DEFS(1);
|
||||
GTSAM_MAKE_MATRIX_DEFS(2);
|
||||
GTSAM_MAKE_MATRIX_DEFS(3);
|
||||
GTSAM_MAKE_MATRIX_DEFS(4);
|
||||
GTSAM_MAKE_MATRIX_DEFS(5);
|
||||
GTSAM_MAKE_MATRIX_DEFS(6);
|
||||
GTSAM_MAKE_MATRIX_DEFS(7);
|
||||
GTSAM_MAKE_MATRIX_DEFS(8);
|
||||
GTSAM_MAKE_MATRIX_DEFS(9);
|
||||
GTSAM_MAKE_MATRIX_DEFS(1)
|
||||
GTSAM_MAKE_MATRIX_DEFS(2)
|
||||
GTSAM_MAKE_MATRIX_DEFS(3)
|
||||
GTSAM_MAKE_MATRIX_DEFS(4)
|
||||
GTSAM_MAKE_MATRIX_DEFS(5)
|
||||
GTSAM_MAKE_MATRIX_DEFS(6)
|
||||
GTSAM_MAKE_MATRIX_DEFS(7)
|
||||
GTSAM_MAKE_MATRIX_DEFS(8)
|
||||
GTSAM_MAKE_MATRIX_DEFS(9)
|
||||
|
||||
// Matrix expressions for accessing parts of matrices
|
||||
typedef Eigen::Block<Matrix> SubMatrix;
|
||||
|
|
|
|||
|
|
@ -173,4 +173,4 @@ namespace gtsam {
|
|||
* @deprecated please use BOOST_CONCEPT_ASSERT and
|
||||
*/
|
||||
#define GTSAM_CONCEPT_TESTABLE_INST(T) template class gtsam::IsTestable<T>;
|
||||
#define GTSAM_CONCEPT_TESTABLE_TYPE(T) typedef gtsam::IsTestable<T> _gtsam_Testable_##T;
|
||||
#define GTSAM_CONCEPT_TESTABLE_TYPE(T) using _gtsam_Testable_##T = gtsam::IsTestable<T>;
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ bool assert_equal(const V& expected, const boost::optional<const V&>& actual, do
|
|||
* \deprecated: use container equals instead
|
||||
*/
|
||||
template<class V>
|
||||
bool assert_equal(const std::vector<V>& expected, const std::vector<V>& actual, double tol = 1e-9) {
|
||||
bool GTSAM_DEPRECATED assert_equal(const std::vector<V>& expected, const std::vector<V>& actual, double tol = 1e-9) {
|
||||
bool match = true;
|
||||
if (expected.size() != actual.size())
|
||||
match = false;
|
||||
|
|
|
|||
|
|
@ -48,18 +48,18 @@ static const Eigen::MatrixBase<Vector3>::ConstantReturnType Z_3x1 = Vector3::Zer
|
|||
// Create handy typedefs and constants for vectors with N>3
|
||||
// VectorN and Z_Nx1, for N=1..9
|
||||
#define GTSAM_MAKE_VECTOR_DEFS(N) \
|
||||
typedef Eigen::Matrix<double, N, 1> Vector##N; \
|
||||
using Vector##N = Eigen::Matrix<double, N, 1>; \
|
||||
static const Eigen::MatrixBase<Vector##N>::ConstantReturnType Z_##N##x1 = Vector##N::Zero();
|
||||
|
||||
GTSAM_MAKE_VECTOR_DEFS(4);
|
||||
GTSAM_MAKE_VECTOR_DEFS(5);
|
||||
GTSAM_MAKE_VECTOR_DEFS(6);
|
||||
GTSAM_MAKE_VECTOR_DEFS(7);
|
||||
GTSAM_MAKE_VECTOR_DEFS(8);
|
||||
GTSAM_MAKE_VECTOR_DEFS(9);
|
||||
GTSAM_MAKE_VECTOR_DEFS(10);
|
||||
GTSAM_MAKE_VECTOR_DEFS(11);
|
||||
GTSAM_MAKE_VECTOR_DEFS(12);
|
||||
GTSAM_MAKE_VECTOR_DEFS(4)
|
||||
GTSAM_MAKE_VECTOR_DEFS(5)
|
||||
GTSAM_MAKE_VECTOR_DEFS(6)
|
||||
GTSAM_MAKE_VECTOR_DEFS(7)
|
||||
GTSAM_MAKE_VECTOR_DEFS(8)
|
||||
GTSAM_MAKE_VECTOR_DEFS(9)
|
||||
GTSAM_MAKE_VECTOR_DEFS(10)
|
||||
GTSAM_MAKE_VECTOR_DEFS(11)
|
||||
GTSAM_MAKE_VECTOR_DEFS(12)
|
||||
|
||||
typedef Eigen::VectorBlock<Vector> SubVector;
|
||||
typedef Eigen::VectorBlock<const Vector> ConstSubVector;
|
||||
|
|
@ -207,14 +207,14 @@ inline double inner_prod(const V1 &a, const V2& b) {
|
|||
* BLAS Level 1 scal: x <- alpha*x
|
||||
* \deprecated: use operators instead
|
||||
*/
|
||||
inline void scal(double alpha, Vector& x) { x *= alpha; }
|
||||
inline void GTSAM_DEPRECATED scal(double alpha, Vector& x) { x *= alpha; }
|
||||
|
||||
/**
|
||||
* BLAS Level 1 axpy: y <- alpha*x + y
|
||||
* \deprecated: use operators instead
|
||||
*/
|
||||
template<class V1, class V2>
|
||||
inline void axpy(double alpha, const V1& x, V2& y) {
|
||||
inline void GTSAM_DEPRECATED axpy(double alpha, const V1& x, V2& y) {
|
||||
assert (y.size()==x.size());
|
||||
y += alpha * x;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ void testDefaultChart(TestResult& result_,
|
|||
const std::string& name_,
|
||||
const T& value) {
|
||||
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(T);
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(T)
|
||||
|
||||
typedef typename gtsam::DefaultChart<T> Chart;
|
||||
typedef typename Chart::vector Vector;
|
||||
|
|
|
|||
|
|
@ -19,8 +19,9 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <sstream>
|
||||
#include <Eigen/Core>
|
||||
#include <fstream>
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
// includes for standard serialization types
|
||||
|
|
@ -40,6 +41,17 @@
|
|||
#include <boost/archive/binary_oarchive.hpp>
|
||||
#include <boost/serialization/export.hpp>
|
||||
|
||||
// Workaround a bug in GCC >= 7 and C++17
|
||||
// ref. https://gitlab.com/libeigen/eigen/-/issues/1676
|
||||
#ifdef __GNUC__
|
||||
#if __GNUC__ >= 7 && __cplusplus >= 201703L
|
||||
namespace boost { namespace serialization { struct U; } }
|
||||
namespace Eigen { namespace internal {
|
||||
template<> struct traits<boost::serialization::U> {enum {Flags=0};};
|
||||
} }
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/** @name Standard serialization
|
||||
|
|
|
|||
|
|
@ -220,8 +220,8 @@ TEST(Vector, axpy )
|
|||
Vector x = Vector3(10., 20., 30.);
|
||||
Vector y0 = Vector3(2.0, 5.0, 6.0);
|
||||
Vector y1 = y0, y2 = y0;
|
||||
axpy(0.1,x,y1);
|
||||
axpy(0.1,x,y2.head(3));
|
||||
y1 += 0.1 * x;
|
||||
y2.head(3) += 0.1 * x;
|
||||
Vector expected = Vector3(3.0, 7.0, 9.0);
|
||||
EXPECT(assert_equal(expected,y1));
|
||||
EXPECT(assert_equal(expected,Vector(y2)));
|
||||
|
|
|
|||
|
|
@ -158,9 +158,8 @@ void DepthFirstForestParallel(FOREST& forest, DATA& rootData,
|
|||
// Typedefs
|
||||
typedef typename FOREST::Node Node;
|
||||
|
||||
tbb::task::spawn_root_and_wait(
|
||||
internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre,
|
||||
visitorPost, problemSizeThreshold));
|
||||
internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre,
|
||||
visitorPost, problemSizeThreshold);
|
||||
#else
|
||||
DepthFirstForest(forest, rootData, visitorPre, visitorPost);
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@
|
|||
#include <boost/make_shared.hpp>
|
||||
|
||||
#ifdef GTSAM_USE_TBB
|
||||
#include <tbb/task.h> // tbb::task, tbb::task_list
|
||||
#include <tbb/task_group.h> // tbb::task_group
|
||||
#include <tbb/scalable_allocator.h> // tbb::scalable_allocator
|
||||
|
||||
namespace gtsam {
|
||||
|
|
@ -34,7 +34,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
class PreOrderTask : public tbb::task
|
||||
class PreOrderTask
|
||||
{
|
||||
public:
|
||||
const boost::shared_ptr<NODE>& treeNode;
|
||||
|
|
@ -42,28 +42,30 @@ namespace gtsam {
|
|||
VISITOR_PRE& visitorPre;
|
||||
VISITOR_POST& visitorPost;
|
||||
int problemSizeThreshold;
|
||||
tbb::task_group& tg;
|
||||
bool makeNewTasks;
|
||||
|
||||
bool isPostOrderPhase;
|
||||
// Keep track of order phase across multiple calls to the same functor
|
||||
mutable bool isPostOrderPhase;
|
||||
|
||||
PreOrderTask(const boost::shared_ptr<NODE>& treeNode, const boost::shared_ptr<DATA>& myData,
|
||||
VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold,
|
||||
bool makeNewTasks = true)
|
||||
tbb::task_group& tg, bool makeNewTasks = true)
|
||||
: treeNode(treeNode),
|
||||
myData(myData),
|
||||
visitorPre(visitorPre),
|
||||
visitorPost(visitorPost),
|
||||
problemSizeThreshold(problemSizeThreshold),
|
||||
tg(tg),
|
||||
makeNewTasks(makeNewTasks),
|
||||
isPostOrderPhase(false) {}
|
||||
|
||||
tbb::task* execute() override
|
||||
void operator()() const
|
||||
{
|
||||
if(isPostOrderPhase)
|
||||
{
|
||||
// Run the post-order visitor since this task was recycled to run the post-order visitor
|
||||
(void) visitorPost(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
@ -71,14 +73,10 @@ namespace gtsam {
|
|||
{
|
||||
if(!treeNode->children.empty())
|
||||
{
|
||||
// Allocate post-order task as a continuation
|
||||
isPostOrderPhase = true;
|
||||
recycle_as_continuation();
|
||||
|
||||
bool overThreshold = (treeNode->problemSize() >= problemSizeThreshold);
|
||||
|
||||
tbb::task* firstChild = 0;
|
||||
tbb::task_list childTasks;
|
||||
// If we have child tasks, start subtasks and wait for them to complete
|
||||
tbb::task_group ctg;
|
||||
for(const boost::shared_ptr<NODE>& child: treeNode->children)
|
||||
{
|
||||
// Process child in a subtask. Important: Run visitorPre before calling
|
||||
|
|
@ -86,37 +84,30 @@ namespace gtsam {
|
|||
// allocated an extra child, this causes a TBB error.
|
||||
boost::shared_ptr<DATA> childData = boost::allocate_shared<DATA>(
|
||||
tbb::scalable_allocator<DATA>(), visitorPre(child, *myData));
|
||||
tbb::task* childTask =
|
||||
new (allocate_child()) PreOrderTask(child, childData, visitorPre, visitorPost,
|
||||
problemSizeThreshold, overThreshold);
|
||||
if (firstChild)
|
||||
childTasks.push_back(*childTask);
|
||||
else
|
||||
firstChild = childTask;
|
||||
ctg.run(PreOrderTask(child, childData, visitorPre, visitorPost,
|
||||
problemSizeThreshold, ctg, overThreshold));
|
||||
}
|
||||
ctg.wait();
|
||||
|
||||
// If we have child tasks, start subtasks and wait for them to complete
|
||||
set_ref_count((int)treeNode->children.size());
|
||||
spawn(childTasks);
|
||||
return firstChild;
|
||||
// Allocate post-order task as a continuation
|
||||
isPostOrderPhase = true;
|
||||
tg.run(*this);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Run the post-order visitor in this task if we have no children
|
||||
(void) visitorPost(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Process this node and its children in this task
|
||||
processNodeRecursively(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData)
|
||||
void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData) const
|
||||
{
|
||||
for(const boost::shared_ptr<NODE>& child: node->children)
|
||||
{
|
||||
|
|
@ -131,7 +122,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<typename ROOTS, typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
class RootTask : public tbb::task
|
||||
class RootTask
|
||||
{
|
||||
public:
|
||||
const ROOTS& roots;
|
||||
|
|
@ -139,38 +130,31 @@ namespace gtsam {
|
|||
VISITOR_PRE& visitorPre;
|
||||
VISITOR_POST& visitorPost;
|
||||
int problemSizeThreshold;
|
||||
tbb::task_group& tg;
|
||||
RootTask(const ROOTS& roots, DATA& myData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost,
|
||||
int problemSizeThreshold) :
|
||||
int problemSizeThreshold, tbb::task_group& tg) :
|
||||
roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost),
|
||||
problemSizeThreshold(problemSizeThreshold) {}
|
||||
problemSizeThreshold(problemSizeThreshold), tg(tg) {}
|
||||
|
||||
tbb::task* execute() override
|
||||
void operator()() const
|
||||
{
|
||||
typedef PreOrderTask<NODE, DATA, VISITOR_PRE, VISITOR_POST> PreOrderTask;
|
||||
// Create data and tasks for our children
|
||||
tbb::task_list tasks;
|
||||
for(const boost::shared_ptr<NODE>& root: roots)
|
||||
{
|
||||
boost::shared_ptr<DATA> rootData = boost::allocate_shared<DATA>(tbb::scalable_allocator<DATA>(), visitorPre(root, myData));
|
||||
tasks.push_back(*new(allocate_child())
|
||||
PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold));
|
||||
tg.run(PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
|
||||
}
|
||||
// Set TBB ref count
|
||||
set_ref_count(1 + (int) roots.size());
|
||||
// Spawn tasks
|
||||
spawn_and_wait_for_all(tasks);
|
||||
// Return nullptr
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename NODE, typename ROOTS, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST>&
|
||||
CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
|
||||
void CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
|
||||
{
|
||||
typedef RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST> RootTask;
|
||||
return *new(tbb::task::allocate_root()) RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold);
|
||||
}
|
||||
tbb::task_group tg;
|
||||
tg.run_and_wait(RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -34,6 +34,14 @@
|
|||
#include <tbb/scalable_allocator.h>
|
||||
#endif
|
||||
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#define GTSAM_DEPRECATED __attribute__((deprecated))
|
||||
#elif defined(_MSC_VER)
|
||||
#define GTSAM_DEPRECATED __declspec(deprecated)
|
||||
#else
|
||||
#define GTSAM_DEPRECATED
|
||||
#endif
|
||||
|
||||
#ifdef GTSAM_USE_EIGEN_MKL_OPENMP
|
||||
#include <omp.h>
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -80,3 +80,6 @@
|
|||
|
||||
// Whether to use the system installed Metis instead of the provided one
|
||||
#cmakedefine GTSAM_USE_SYSTEM_METIS
|
||||
|
||||
// Toggle switch for BetweenFactor jacobian computation
|
||||
#cmakedefine GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR
|
||||
|
|
|
|||
|
|
@ -54,20 +54,20 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteFactor::sharedValues DiscreteBayesNet::optimize() const {
|
||||
DiscreteFactor::Values DiscreteBayesNet::optimize() const {
|
||||
// solve each node in turn in topological sort order (parents first)
|
||||
DiscreteFactor::sharedValues result(new DiscreteFactor::Values());
|
||||
DiscreteFactor::Values result;
|
||||
for (auto conditional: boost::adaptors::reverse(*this))
|
||||
conditional->solveInPlace(*result);
|
||||
conditional->solveInPlace(&result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteFactor::sharedValues DiscreteBayesNet::sample() const {
|
||||
DiscreteFactor::Values DiscreteBayesNet::sample() const {
|
||||
// sample each node in turn in topological sort order (parents first)
|
||||
DiscreteFactor::sharedValues result(new DiscreteFactor::Values());
|
||||
DiscreteFactor::Values result;
|
||||
for (auto conditional: boost::adaptors::reverse(*this))
|
||||
conditional->sampleInPlace(*result);
|
||||
conditional->sampleInPlace(&result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -83,10 +83,10 @@ namespace gtsam {
|
|||
/**
|
||||
* Solve the DiscreteBayesNet by back-substitution
|
||||
*/
|
||||
DiscreteFactor::sharedValues optimize() const;
|
||||
DiscreteFactor::Values optimize() const;
|
||||
|
||||
/** Do ancestral sampling */
|
||||
DiscreteFactor::sharedValues sample() const;
|
||||
DiscreteFactor::Values sample() const;
|
||||
|
||||
///@}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ using namespace std;
|
|||
namespace gtsam {
|
||||
|
||||
// Instantiate base class
|
||||
template class Conditional<DecisionTreeFactor, DiscreteConditional> ;
|
||||
template class GTSAM_EXPORT Conditional<DecisionTreeFactor, DiscreteConditional> ;
|
||||
|
||||
/* ******************************************************************************** */
|
||||
DiscreteConditional::DiscreteConditional(const size_t nrFrontals,
|
||||
|
|
@ -117,9 +117,9 @@ Potentials::ADT DiscreteConditional::choose(const Values& parentsValues) const {
|
|||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
void DiscreteConditional::solveInPlace(Values& values) const {
|
||||
void DiscreteConditional::solveInPlace(Values* values) const {
|
||||
// TODO: Abhijit asks: is this really the fastest way? He thinks it is.
|
||||
ADT pFS = choose(values); // P(F|S=parentsValues)
|
||||
ADT pFS = choose(*values); // P(F|S=parentsValues)
|
||||
|
||||
// Initialize
|
||||
Values mpe;
|
||||
|
|
@ -145,16 +145,16 @@ void DiscreteConditional::solveInPlace(Values& values) const {
|
|||
|
||||
//set values (inPlace) to mpe
|
||||
for(Key j: frontals()) {
|
||||
values[j] = mpe[j];
|
||||
(*values)[j] = mpe[j];
|
||||
}
|
||||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
void DiscreteConditional::sampleInPlace(Values& values) const {
|
||||
void DiscreteConditional::sampleInPlace(Values* values) const {
|
||||
assert(nrFrontals() == 1);
|
||||
Key j = (firstFrontalKey());
|
||||
size_t sampled = sample(values); // Sample variable
|
||||
values[j] = sampled; // store result in partial solution
|
||||
size_t sampled = sample(*values); // Sample variable given parents
|
||||
(*values)[j] = sampled; // store result in partial solution
|
||||
}
|
||||
|
||||
/* ******************************************************************************** */
|
||||
|
|
|
|||
|
|
@ -45,7 +45,6 @@ public:
|
|||
/** A map from keys to values..
|
||||
* TODO: Again, do we need this??? */
|
||||
typedef Assignment<Key> Values;
|
||||
typedef boost::shared_ptr<Values> sharedValues;
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
|
@ -133,10 +132,10 @@ public:
|
|||
/// @{
|
||||
|
||||
/// solve a conditional, in place
|
||||
void solveInPlace(Values& parentsValues) const;
|
||||
void solveInPlace(Values* parentsValues) const;
|
||||
|
||||
/// sample in place, stores result in partial solution
|
||||
void sampleInPlace(Values& parentsValues) const;
|
||||
void sampleInPlace(Values* parentsValues) const;
|
||||
|
||||
/// @}
|
||||
|
||||
|
|
|
|||
|
|
@ -51,7 +51,6 @@ public:
|
|||
* the new class DiscreteValue, as the varible's type (domain)
|
||||
*/
|
||||
typedef Assignment<Key> Values;
|
||||
typedef boost::shared_ptr<Values> sharedValues;
|
||||
|
||||
public:
|
||||
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ namespace gtsam {
|
|||
// }
|
||||
|
||||
/* ************************************************************************* */
|
||||
DiscreteFactor::sharedValues DiscreteFactorGraph::optimize() const
|
||||
DiscreteFactorGraph::Values DiscreteFactorGraph::optimize() const
|
||||
{
|
||||
gttic(DiscreteFactorGraph_optimize);
|
||||
return BaseEliminateable::eliminateSequential()->optimize();
|
||||
|
|
|
|||
|
|
@ -74,7 +74,6 @@ public:
|
|||
/** A map from keys to values */
|
||||
typedef KeyVector Indices;
|
||||
typedef Assignment<Key> Values;
|
||||
typedef boost::shared_ptr<Values> sharedValues;
|
||||
|
||||
/** Default constructor */
|
||||
DiscreteFactorGraph() {}
|
||||
|
|
@ -101,25 +100,27 @@ public:
|
|||
|
||||
/// @}
|
||||
|
||||
template<class SOURCE>
|
||||
// Add single key decision-tree factor.
|
||||
template <class SOURCE>
|
||||
void add(const DiscreteKey& j, SOURCE table) {
|
||||
DiscreteKeys keys;
|
||||
keys.push_back(j);
|
||||
push_back(boost::make_shared<DecisionTreeFactor>(keys, table));
|
||||
emplace_shared<DecisionTreeFactor>(keys, table);
|
||||
}
|
||||
|
||||
template<class SOURCE>
|
||||
// Add binary key decision-tree factor.
|
||||
template <class SOURCE>
|
||||
void add(const DiscreteKey& j1, const DiscreteKey& j2, SOURCE table) {
|
||||
DiscreteKeys keys;
|
||||
keys.push_back(j1);
|
||||
keys.push_back(j2);
|
||||
push_back(boost::make_shared<DecisionTreeFactor>(keys, table));
|
||||
emplace_shared<DecisionTreeFactor>(keys, table);
|
||||
}
|
||||
|
||||
/** add shared discreteFactor immediately from arguments */
|
||||
template<class SOURCE>
|
||||
// Add shared discreteFactor immediately from arguments.
|
||||
template <class SOURCE>
|
||||
void add(const DiscreteKeys& keys, SOURCE table) {
|
||||
push_back(boost::make_shared<DecisionTreeFactor>(keys, table));
|
||||
emplace_shared<DecisionTreeFactor>(keys, table);
|
||||
}
|
||||
|
||||
/** Return the set of variables involved in the factors (set union) */
|
||||
|
|
@ -140,7 +141,7 @@ public:
|
|||
* the dense elimination function specified in \c function,
|
||||
* followed by back-substitution resulting from elimination. Is equivalent
|
||||
* to calling graph.eliminateSequential()->optimize(). */
|
||||
DiscreteFactor::sharedValues optimize() const;
|
||||
Values optimize() const;
|
||||
|
||||
|
||||
// /** Permute the variables in the factors */
|
||||
|
|
|
|||
|
|
@ -104,12 +104,12 @@ TEST(DiscreteBayesNet, Asia) {
|
|||
EXPECT(assert_equal(expected2, *chordal->back()));
|
||||
|
||||
// solve
|
||||
DiscreteFactor::sharedValues actualMPE = chordal->optimize();
|
||||
auto actualMPE = chordal->optimize();
|
||||
DiscreteFactor::Values expectedMPE;
|
||||
insert(expectedMPE)(Asia.first, 0)(Dyspnea.first, 0)(XRay.first, 0)(
|
||||
Tuberculosis.first, 0)(Smoking.first, 0)(Either.first, 0)(
|
||||
LungCancer.first, 0)(Bronchitis.first, 0);
|
||||
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
||||
EXPECT(assert_equal(expectedMPE, actualMPE));
|
||||
|
||||
// add evidence, we were in Asia and we have dyspnea
|
||||
fg.add(Asia, "0 1");
|
||||
|
|
@ -117,12 +117,12 @@ TEST(DiscreteBayesNet, Asia) {
|
|||
|
||||
// solve again, now with evidence
|
||||
DiscreteBayesNet::shared_ptr chordal2 = fg.eliminateSequential(ordering);
|
||||
DiscreteFactor::sharedValues actualMPE2 = chordal2->optimize();
|
||||
auto actualMPE2 = chordal2->optimize();
|
||||
DiscreteFactor::Values expectedMPE2;
|
||||
insert(expectedMPE2)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 0)(
|
||||
Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 0)(
|
||||
LungCancer.first, 0)(Bronchitis.first, 1);
|
||||
EXPECT(assert_equal(expectedMPE2, *actualMPE2));
|
||||
EXPECT(assert_equal(expectedMPE2, actualMPE2));
|
||||
|
||||
// now sample from it
|
||||
DiscreteFactor::Values expectedSample;
|
||||
|
|
@ -130,8 +130,8 @@ TEST(DiscreteBayesNet, Asia) {
|
|||
insert(expectedSample)(Asia.first, 1)(Dyspnea.first, 1)(XRay.first, 1)(
|
||||
Tuberculosis.first, 0)(Smoking.first, 1)(Either.first, 1)(
|
||||
LungCancer.first, 1)(Bronchitis.first, 0);
|
||||
DiscreteFactor::sharedValues actualSample = chordal2->sample();
|
||||
EXPECT(assert_equal(expectedSample, *actualSample));
|
||||
auto actualSample = chordal2->sample();
|
||||
EXPECT(assert_equal(expectedSample, actualSample));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -169,8 +169,8 @@ TEST( DiscreteFactorGraph, test)
|
|||
// Test optimization
|
||||
DiscreteFactor::Values expectedValues;
|
||||
insert(expectedValues)(0, 0)(1, 0)(2, 0);
|
||||
DiscreteFactor::sharedValues actualValues = graph.optimize();
|
||||
EXPECT(assert_equal(expectedValues, *actualValues));
|
||||
auto actualValues = graph.optimize();
|
||||
EXPECT(assert_equal(expectedValues, actualValues));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -186,11 +186,11 @@ TEST( DiscreteFactorGraph, testMPE)
|
|||
// graph.product().print();
|
||||
// DiscreteSequentialSolver(graph).eliminate()->print();
|
||||
|
||||
DiscreteFactor::sharedValues actualMPE = graph.optimize();
|
||||
auto actualMPE = graph.optimize();
|
||||
|
||||
DiscreteFactor::Values expectedMPE;
|
||||
insert(expectedMPE)(0, 0)(1, 1)(2, 1);
|
||||
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
||||
EXPECT(assert_equal(expectedMPE, actualMPE));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -216,8 +216,8 @@ TEST( DiscreteFactorGraph, testMPE_Darwiche09book_p244)
|
|||
|
||||
// Use the solver machinery.
|
||||
DiscreteBayesNet::shared_ptr chordal = graph.eliminateSequential();
|
||||
DiscreteFactor::sharedValues actualMPE = chordal->optimize();
|
||||
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
||||
auto actualMPE = chordal->optimize();
|
||||
EXPECT(assert_equal(expectedMPE, actualMPE));
|
||||
// DiscreteConditional::shared_ptr root = chordal->back();
|
||||
// EXPECT_DOUBLES_EQUAL(0.4, (*root)(*actualMPE), 1e-9);
|
||||
|
||||
|
|
@ -244,8 +244,8 @@ ETree::shared_ptr eTree = ETree::Create(graph, structure);
|
|||
// eliminate normally and check solution
|
||||
DiscreteBayesNet::shared_ptr bayesNet = eTree->eliminate(&EliminateDiscrete);
|
||||
// bayesNet->print(">>>>>>>>>>>>>> Bayes Net <<<<<<<<<<<<<<<<<<");
|
||||
DiscreteFactor::sharedValues actualMPE = optimize(*bayesNet);
|
||||
EXPECT(assert_equal(expectedMPE, *actualMPE));
|
||||
auto actualMPE = optimize(*bayesNet);
|
||||
EXPECT(assert_equal(expectedMPE, actualMPE));
|
||||
|
||||
// Approximate and check solution
|
||||
// DiscreteBayesNet::shared_ptr approximateNet = eTree->approximate();
|
||||
|
|
|
|||
|
|
@ -46,9 +46,9 @@ double Cal3Fisheye::Scaling(double r) {
|
|||
/* ************************************************************************* */
|
||||
Point2 Cal3Fisheye::uncalibrate(const Point2& p, OptionalJacobian<2, 9> H1,
|
||||
OptionalJacobian<2, 2> H2) const {
|
||||
const double xi = p.x(), yi = p.y();
|
||||
const double xi = p.x(), yi = p.y(), zi = 1;
|
||||
const double r2 = xi * xi + yi * yi, r = sqrt(r2);
|
||||
const double t = atan(r);
|
||||
const double t = atan2(r, zi);
|
||||
const double t2 = t * t, t4 = t2 * t2, t6 = t2 * t4, t8 = t4 * t4;
|
||||
Vector5 K, T;
|
||||
K << 1, k1_, k2_, k3_, k4_;
|
||||
|
|
@ -76,28 +76,32 @@ Point2 Cal3Fisheye::uncalibrate(const Point2& p, OptionalJacobian<2, 9> H1,
|
|||
|
||||
// Derivative for points in intrinsic coords (2 by 2)
|
||||
if (H2) {
|
||||
const double dtd_dt =
|
||||
1 + 3 * k1_ * t2 + 5 * k2_ * t4 + 7 * k3_ * t6 + 9 * k4_ * t8;
|
||||
const double dt_dr = 1 / (1 + r2);
|
||||
const double rinv = 1 / r;
|
||||
const double dr_dxi = xi * rinv;
|
||||
const double dr_dyi = yi * rinv;
|
||||
const double dtd_dxi = dtd_dt * dt_dr * dr_dxi;
|
||||
const double dtd_dyi = dtd_dt * dt_dr * dr_dyi;
|
||||
if (r2==0) {
|
||||
*H2 = DK;
|
||||
} else {
|
||||
const double dtd_dt =
|
||||
1 + 3 * k1_ * t2 + 5 * k2_ * t4 + 7 * k3_ * t6 + 9 * k4_ * t8;
|
||||
const double R2 = r2 + zi*zi;
|
||||
const double dt_dr = zi / R2;
|
||||
const double rinv = 1 / r;
|
||||
const double dr_dxi = xi * rinv;
|
||||
const double dr_dyi = yi * rinv;
|
||||
const double dtd_dr = dtd_dt * dt_dr;
|
||||
|
||||
const double c2 = dr_dxi * dr_dxi;
|
||||
const double s2 = dr_dyi * dr_dyi;
|
||||
const double cs = dr_dxi * dr_dyi;
|
||||
|
||||
const double td = t * K.dot(T);
|
||||
const double rrinv = 1 / r2;
|
||||
const double dxd_dxi =
|
||||
dtd_dxi * dr_dxi + td * rinv - td * xi * rrinv * dr_dxi;
|
||||
const double dxd_dyi = dtd_dyi * dr_dxi - td * xi * rrinv * dr_dyi;
|
||||
const double dyd_dxi = dtd_dxi * dr_dyi - td * yi * rrinv * dr_dxi;
|
||||
const double dyd_dyi =
|
||||
dtd_dyi * dr_dyi + td * rinv - td * yi * rrinv * dr_dyi;
|
||||
const double dxd_dxi = dtd_dr * c2 + s * (1 - c2);
|
||||
const double dxd_dyi = (dtd_dr - s) * cs;
|
||||
const double dyd_dxi = dxd_dyi;
|
||||
const double dyd_dyi = dtd_dr * s2 + s * (1 - s2);
|
||||
|
||||
Matrix2 DR;
|
||||
DR << dxd_dxi, dxd_dyi, dyd_dxi, dyd_dyi;
|
||||
Matrix2 DR;
|
||||
DR << dxd_dxi, dxd_dyi, dyd_dxi, dyd_dyi;
|
||||
|
||||
*H2 = DK * DR;
|
||||
*H2 = DK * DR;
|
||||
}
|
||||
}
|
||||
|
||||
return uv;
|
||||
|
|
|
|||
|
|
@ -312,6 +312,16 @@ public:
|
|||
return range(camera.pose(), Dcamera, Dother);
|
||||
}
|
||||
|
||||
/// for Linear Triangulation
|
||||
Matrix34 cameraProjectionMatrix() const {
|
||||
return K_.K() * PinholeBase::pose().inverse().matrix().block(0, 0, 3, 4);
|
||||
}
|
||||
|
||||
/// for Nonlinear Triangulation
|
||||
Vector defaultErrorWhenTriangulatingBehindCamera() const {
|
||||
return Eigen::Matrix<double,traits<Point2>::dimension,1>::Constant(2.0 * K_.fx());;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
/** Serialization function */
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class GTSAM_EXPORT PinholeBaseK: public PinholeBase {
|
|||
|
||||
private:
|
||||
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION);
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION)
|
||||
|
||||
// Get dimensions of calibration type at compile time
|
||||
static const int DimK = FixedDimension<CALIBRATION>::value;
|
||||
|
|
@ -121,6 +121,13 @@ public:
|
|||
return _project(pw, Dpose, Dpoint, Dcal);
|
||||
}
|
||||
|
||||
/// project a 3D point from world coordinates into the image
|
||||
Point2 reprojectionError(const Point3& pw, const Point2& measured, OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 3> Dpoint = boost::none,
|
||||
OptionalJacobian<2, DimK> Dcal = boost::none) const {
|
||||
return Point2(_project(pw, Dpose, Dpoint, Dcal) - measured);
|
||||
}
|
||||
|
||||
/// project a point at infinity from world coordinates into the image
|
||||
Point2 project(const Unit3& pw, OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 2> Dpoint = boost::none,
|
||||
|
|
@ -159,7 +166,6 @@ public:
|
|||
return result;
|
||||
}
|
||||
|
||||
|
||||
/// backproject a 2-dimensional point to a 3-dimensional point at infinity
|
||||
Unit3 backprojectPointAtInfinity(const Point2& p) const {
|
||||
const Point2 pn = calibration().calibrate(p);
|
||||
|
|
@ -410,6 +416,16 @@ public:
|
|||
return PinholePose(); // assumes that the default constructor is valid
|
||||
}
|
||||
|
||||
/// for Linear Triangulation
|
||||
Matrix34 cameraProjectionMatrix() const {
|
||||
Matrix34 P = Matrix34(PinholeBase::pose().inverse().matrix().block(0, 0, 3, 4));
|
||||
return K_->K() * P;
|
||||
}
|
||||
|
||||
/// for Nonlinear Triangulation
|
||||
Vector defaultErrorWhenTriangulatingBehindCamera() const {
|
||||
return Eigen::Matrix<double,traits<Point2>::dimension,1>::Constant(2.0 * K_->fx());;
|
||||
}
|
||||
/// @}
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ using namespace std;
|
|||
namespace gtsam {
|
||||
|
||||
/** instantiate concept checks */
|
||||
GTSAM_CONCEPT_POSE_INST(Pose2);
|
||||
GTSAM_CONCEPT_POSE_INST(Pose2)
|
||||
|
||||
static const Rot2 R_PI_2(Rot2::fromCosSin(0., 1.));
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ using std::vector;
|
|||
using Point3Pairs = vector<Point3Pair>;
|
||||
|
||||
/** instantiate concept checks */
|
||||
GTSAM_CONCEPT_POSE_INST(Pose3);
|
||||
GTSAM_CONCEPT_POSE_INST(Pose3)
|
||||
|
||||
/* ************************************************************************* */
|
||||
Pose3::Pose3(const Pose2& pose2) :
|
||||
|
|
@ -63,6 +63,46 @@ Matrix6 Pose3::AdjointMap() const {
|
|||
return adj;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Calculate AdjointMap applied to xi_b, with Jacobians
|
||||
Vector6 Pose3::Adjoint(const Vector6& xi_b, OptionalJacobian<6, 6> H_pose,
|
||||
OptionalJacobian<6, 6> H_xib) const {
|
||||
const Matrix6 Ad = AdjointMap();
|
||||
|
||||
// Jacobians
|
||||
// D1 Ad_T(xi_b) = D1 Ad_T Ad_I(xi_b) = Ad_T * D1 Ad_I(xi_b) = Ad_T * ad_xi_b
|
||||
// D2 Ad_T(xi_b) = Ad_T
|
||||
// See docs/math.pdf for more details.
|
||||
// In D1 calculation, we could be more efficient by writing it out, but do not
|
||||
// for readability
|
||||
if (H_pose) *H_pose = -Ad * adjointMap(xi_b);
|
||||
if (H_xib) *H_xib = Ad;
|
||||
|
||||
return Ad * xi_b;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/// The dual version of Adjoint
|
||||
Vector6 Pose3::AdjointTranspose(const Vector6& x, OptionalJacobian<6, 6> H_pose,
|
||||
OptionalJacobian<6, 6> H_x) const {
|
||||
const Matrix6 Ad = AdjointMap();
|
||||
const Vector6 AdTx = Ad.transpose() * x;
|
||||
|
||||
// Jacobians
|
||||
// See docs/math.pdf for more details.
|
||||
if (H_pose) {
|
||||
const auto w_T_hat = skewSymmetric(AdTx.head<3>()),
|
||||
v_T_hat = skewSymmetric(AdTx.tail<3>());
|
||||
*H_pose << w_T_hat, v_T_hat, //
|
||||
/* */ v_T_hat, Z_3x3;
|
||||
}
|
||||
if (H_x) {
|
||||
*H_x = Ad.transpose();
|
||||
}
|
||||
|
||||
return AdTx;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix6 Pose3::adjointMap(const Vector6& xi) {
|
||||
Matrix3 w_hat = skewSymmetric(xi(0), xi(1), xi(2));
|
||||
|
|
@ -75,7 +115,7 @@ Matrix6 Pose3::adjointMap(const Vector6& xi) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
||||
OptionalJacobian<6, 6> Hxi) {
|
||||
OptionalJacobian<6, 6> Hxi, OptionalJacobian<6, 6> H_y) {
|
||||
if (Hxi) {
|
||||
Hxi->setZero();
|
||||
for (int i = 0; i < 6; ++i) {
|
||||
|
|
@ -86,12 +126,14 @@ Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
|||
Hxi->col(i) = Gi * y;
|
||||
}
|
||||
}
|
||||
return adjointMap(xi) * y;
|
||||
const Matrix6& ad_xi = adjointMap(xi);
|
||||
if (H_y) *H_y = ad_xi;
|
||||
return ad_xi * y;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
|
||||
OptionalJacobian<6, 6> Hxi) {
|
||||
OptionalJacobian<6, 6> Hxi, OptionalJacobian<6, 6> H_y) {
|
||||
if (Hxi) {
|
||||
Hxi->setZero();
|
||||
for (int i = 0; i < 6; ++i) {
|
||||
|
|
@ -102,7 +144,9 @@ Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
|
|||
Hxi->col(i) = GTi * y;
|
||||
}
|
||||
}
|
||||
return adjointMap(xi).transpose() * y;
|
||||
const Matrix6& adT_xi = adjointMap(xi).transpose();
|
||||
if (H_y) *H_y = adT_xi;
|
||||
return adT_xi * y;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -145,15 +145,22 @@ public:
|
|||
* Calculate Adjoint map, transforming a twist in this pose's (i.e, body) frame to the world spatial frame
|
||||
* Ad_pose is 6*6 matrix that when applied to twist xi \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$, returns Ad_pose(xi)
|
||||
*/
|
||||
Matrix6 AdjointMap() const; /// FIXME Not tested - marked as incorrect
|
||||
Matrix6 AdjointMap() const;
|
||||
|
||||
/**
|
||||
* Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a body-fixed velocity, transforming it to the spatial frame
|
||||
* Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a
|
||||
* body-fixed velocity, transforming it to the spatial frame
|
||||
* \f$ \xi^s = g*\xi^b*g^{-1} = Ad_g * \xi^b \f$
|
||||
* Note that H_xib = AdjointMap()
|
||||
*/
|
||||
Vector6 Adjoint(const Vector6& xi_b) const {
|
||||
return AdjointMap() * xi_b;
|
||||
} /// FIXME Not tested - marked as incorrect
|
||||
Vector6 Adjoint(const Vector6& xi_b,
|
||||
OptionalJacobian<6, 6> H_this = boost::none,
|
||||
OptionalJacobian<6, 6> H_xib = boost::none) const;
|
||||
|
||||
/// The dual version of Adjoint
|
||||
Vector6 AdjointTranspose(const Vector6& x,
|
||||
OptionalJacobian<6, 6> H_this = boost::none,
|
||||
OptionalJacobian<6, 6> H_x = boost::none) const;
|
||||
|
||||
/**
|
||||
* Compute the [ad(w,v)] operator as defined in [Kobilarov09siggraph], pg 11
|
||||
|
|
@ -170,13 +177,14 @@ public:
|
|||
* and its inverse transpose in the discrete Euler Poincare' (DEP) operator.
|
||||
*
|
||||
*/
|
||||
static Matrix6 adjointMap(const Vector6 &xi);
|
||||
static Matrix6 adjointMap(const Vector6& xi);
|
||||
|
||||
/**
|
||||
* Action of the adjointMap on a Lie-algebra vector y, with optional derivatives
|
||||
*/
|
||||
static Vector6 adjoint(const Vector6 &xi, const Vector6 &y,
|
||||
OptionalJacobian<6, 6> Hxi = boost::none);
|
||||
static Vector6 adjoint(const Vector6& xi, const Vector6& y,
|
||||
OptionalJacobian<6, 6> Hxi = boost::none,
|
||||
OptionalJacobian<6, 6> H_y = boost::none);
|
||||
|
||||
// temporary fix for wrappers until case issue is resolved
|
||||
static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);}
|
||||
|
|
@ -186,7 +194,8 @@ public:
|
|||
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
|
||||
*/
|
||||
static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y,
|
||||
OptionalJacobian<6, 6> Hxi = boost::none);
|
||||
OptionalJacobian<6, 6> Hxi = boost::none,
|
||||
OptionalJacobian<6, 6> H_y = boost::none);
|
||||
|
||||
/// Derivative of Expmap
|
||||
static Matrix6 ExpmapDerivative(const Vector6& xi);
|
||||
|
|
|
|||
|
|
@ -117,13 +117,23 @@ struct traits<QUATERNION_TYPE> {
|
|||
omega = (-8. / 3. - 2. / 3. * qw) * q.vec();
|
||||
} else {
|
||||
// Normal, away from zero case
|
||||
_Scalar angle = 2 * acos(qw), s = sqrt(1 - qw * qw);
|
||||
// Important: convert to [-pi,pi] to keep error continuous
|
||||
if (angle > M_PI)
|
||||
angle -= twoPi;
|
||||
else if (angle < -M_PI)
|
||||
angle += twoPi;
|
||||
omega = (angle / s) * q.vec();
|
||||
if (qw > 0) {
|
||||
_Scalar angle = 2 * acos(qw), s = sqrt(1 - qw * qw);
|
||||
// Important: convert to [-pi,pi] to keep error continuous
|
||||
if (angle > M_PI)
|
||||
angle -= twoPi;
|
||||
else if (angle < -M_PI)
|
||||
angle += twoPi;
|
||||
omega = (angle / s) * q.vec();
|
||||
} else {
|
||||
// Make sure that we are using a canonical quaternion with w > 0
|
||||
_Scalar angle = 2 * acos(-qw), s = sqrt(1 - qw * qw);
|
||||
if (angle > M_PI)
|
||||
angle -= twoPi;
|
||||
else if (angle < -M_PI)
|
||||
angle += twoPi;
|
||||
omega = (angle / s) * -q.vec();
|
||||
}
|
||||
}
|
||||
|
||||
if(H) *H = SO3::LogmapDerivative(omega.template cast<double>());
|
||||
|
|
|
|||
|
|
@ -261,25 +261,59 @@ Vector3 SO3::Logmap(const SO3& Q, ChartJacobian H) {
|
|||
|
||||
// when trace == -1, i.e., when theta = +-pi, +-3pi, +-5pi, etc.
|
||||
// we do something special
|
||||
if (tr + 1.0 < 1e-10) {
|
||||
if (std::abs(R33 + 1.0) > 1e-5)
|
||||
omega = (M_PI / sqrt(2.0 + 2.0 * R33)) * Vector3(R13, R23, 1.0 + R33);
|
||||
else if (std::abs(R22 + 1.0) > 1e-5)
|
||||
omega = (M_PI / sqrt(2.0 + 2.0 * R22)) * Vector3(R12, 1.0 + R22, R32);
|
||||
else
|
||||
// if(std::abs(R.r1_.x()+1.0) > 1e-5) This is implicit
|
||||
omega = (M_PI / sqrt(2.0 + 2.0 * R11)) * Vector3(1.0 + R11, R21, R31);
|
||||
if (tr + 1.0 < 1e-3) {
|
||||
if (R33 > R22 && R33 > R11) {
|
||||
// R33 is the largest diagonal, a=3, b=1, c=2
|
||||
const double W = R21 - R12;
|
||||
const double Q1 = 2.0 + 2.0 * R33;
|
||||
const double Q2 = R31 + R13;
|
||||
const double Q3 = R23 + R32;
|
||||
const double r = sqrt(Q1);
|
||||
const double one_over_r = 1 / r;
|
||||
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
|
||||
const double sgn_w = W < 0 ? -1.0 : 1.0;
|
||||
const double mag = M_PI - (2 * sgn_w * W) / norm;
|
||||
const double scale = 0.5 * one_over_r * mag;
|
||||
omega = sgn_w * scale * Vector3(Q2, Q3, Q1);
|
||||
} else if (R22 > R11) {
|
||||
// R22 is the largest diagonal, a=2, b=3, c=1
|
||||
const double W = R13 - R31;
|
||||
const double Q1 = 2.0 + 2.0 * R22;
|
||||
const double Q2 = R23 + R32;
|
||||
const double Q3 = R12 + R21;
|
||||
const double r = sqrt(Q1);
|
||||
const double one_over_r = 1 / r;
|
||||
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
|
||||
const double sgn_w = W < 0 ? -1.0 : 1.0;
|
||||
const double mag = M_PI - (2 * sgn_w * W) / norm;
|
||||
const double scale = 0.5 * one_over_r * mag;
|
||||
omega = sgn_w * scale * Vector3(Q3, Q1, Q2);
|
||||
} else {
|
||||
// R11 is the largest diagonal, a=1, b=2, c=3
|
||||
const double W = R32 - R23;
|
||||
const double Q1 = 2.0 + 2.0 * R11;
|
||||
const double Q2 = R12 + R21;
|
||||
const double Q3 = R31 + R13;
|
||||
const double r = sqrt(Q1);
|
||||
const double one_over_r = 1 / r;
|
||||
const double norm = sqrt(Q1*Q1 + Q2*Q2 + Q3*Q3 + W*W);
|
||||
const double sgn_w = W < 0 ? -1.0 : 1.0;
|
||||
const double mag = M_PI - (2 * sgn_w * W) / norm;
|
||||
const double scale = 0.5 * one_over_r * mag;
|
||||
omega = sgn_w * scale * Vector3(Q1, Q2, Q3);
|
||||
}
|
||||
} else {
|
||||
double magnitude;
|
||||
const double tr_3 = tr - 3.0; // always negative
|
||||
if (tr_3 < -1e-7) {
|
||||
const double tr_3 = tr - 3.0; // could be non-negative if the matrix is off orthogonal
|
||||
if (tr_3 < -1e-6) {
|
||||
// this is the normal case -1 < trace < 3
|
||||
double theta = acos((tr - 1.0) / 2.0);
|
||||
magnitude = theta / (2.0 * sin(theta));
|
||||
} else {
|
||||
// when theta near 0, +-2pi, +-4pi, etc. (trace near 3.0)
|
||||
// use Taylor expansion: theta \approx 1/2-(t-3)/12 + O((t-3)^2)
|
||||
// see https://github.com/borglab/gtsam/issues/746 for details
|
||||
magnitude = 0.5 - tr_3 / 12.0;
|
||||
magnitude = 0.5 - tr_3 / 12.0 + tr_3*tr_3/60.0;
|
||||
}
|
||||
omega = magnitude * Vector3(R32 - R23, R13 - R31, R21 - R12);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -40,8 +40,8 @@ static Point3Pairs subtractCentroids(const Point3Pairs &abPointPairs,
|
|||
}
|
||||
|
||||
/// Form inner products x and y and calculate scale.
|
||||
static const double calculateScale(const Point3Pairs &d_abPointPairs,
|
||||
const Rot3 &aRb) {
|
||||
static double calculateScale(const Point3Pairs &d_abPointPairs,
|
||||
const Rot3 &aRb) {
|
||||
double x = 0, y = 0;
|
||||
Point3 da, db;
|
||||
for (const Point3Pair& d_abPair : d_abPointPairs) {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,109 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SphericalCamera.h
|
||||
* @brief Calibrated camera with spherical projection
|
||||
* @date Aug 26, 2021
|
||||
* @author Luca Carlone
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/SphericalCamera.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool SphericalCamera::equals(const SphericalCamera& camera, double tol) const {
|
||||
return pose_.equals(camera.pose(), tol);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void SphericalCamera::print(const string& s) const { pose_.print(s + ".pose"); }
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<Unit3, bool> SphericalCamera::projectSafe(const Point3& pw) const {
|
||||
const Point3 pc = pose().transformTo(pw);
|
||||
Unit3 pu = Unit3::FromPoint3(pc);
|
||||
return make_pair(pu, pc.norm() > 1e-8);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Unit3 SphericalCamera::project2(const Point3& pw, OptionalJacobian<2, 6> Dpose,
|
||||
OptionalJacobian<2, 3> Dpoint) const {
|
||||
Matrix36 Dtf_pose;
|
||||
Matrix3 Dtf_point; // calculated by transformTo if needed
|
||||
const Point3 pc =
|
||||
pose().transformTo(pw, Dpose ? &Dtf_pose : 0, Dpoint ? &Dtf_point : 0);
|
||||
|
||||
if (pc.norm() <= 1e-8) throw("point cannot be at the center of the camera");
|
||||
|
||||
Matrix23 Dunit; // calculated by FromPoint3 if needed
|
||||
Unit3 pu = Unit3::FromPoint3(Point3(pc), Dpoint ? &Dunit : 0);
|
||||
|
||||
if (Dpose) *Dpose = Dunit * Dtf_pose; // 2x3 * 3x6 = 2x6
|
||||
if (Dpoint) *Dpoint = Dunit * Dtf_point; // 2x3 * 3x3 = 2x3
|
||||
return pu;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Unit3 SphericalCamera::project2(const Unit3& pwu, OptionalJacobian<2, 6> Dpose,
|
||||
OptionalJacobian<2, 2> Dpoint) const {
|
||||
Matrix23 Dtf_rot;
|
||||
Matrix2 Dtf_point; // calculated by transformTo if needed
|
||||
const Unit3 pu = pose().rotation().unrotate(pwu, Dpose ? &Dtf_rot : 0,
|
||||
Dpoint ? &Dtf_point : 0);
|
||||
|
||||
if (Dpose)
|
||||
*Dpose << Dtf_rot, Matrix::Zero(2, 3); // 2x6 (translation part is zero)
|
||||
if (Dpoint) *Dpoint = Dtf_point; // 2x2
|
||||
return pu;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Point3 SphericalCamera::backproject(const Unit3& pu, const double depth) const {
|
||||
return pose().transformFrom(depth * pu);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Unit3 SphericalCamera::backprojectPointAtInfinity(const Unit3& p) const {
|
||||
return pose().rotation().rotate(p);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Unit3 SphericalCamera::project(const Point3& point,
|
||||
OptionalJacobian<2, 6> Dcamera,
|
||||
OptionalJacobian<2, 3> Dpoint) const {
|
||||
return project2(point, Dcamera, Dpoint);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector2 SphericalCamera::reprojectionError(
|
||||
const Point3& point, const Unit3& measured, OptionalJacobian<2, 6> Dpose,
|
||||
OptionalJacobian<2, 3> Dpoint) const {
|
||||
// project point
|
||||
if (Dpose || Dpoint) {
|
||||
Matrix26 H_project_pose;
|
||||
Matrix23 H_project_point;
|
||||
Matrix22 H_error;
|
||||
Unit3 projected = project2(point, H_project_pose, H_project_point);
|
||||
Vector2 error = measured.errorVector(projected, boost::none, H_error);
|
||||
if (Dpose) *Dpose = H_error * H_project_pose;
|
||||
if (Dpoint) *Dpoint = H_error * H_project_point;
|
||||
return error;
|
||||
} else {
|
||||
return measured.errorVector(project2(point, Dpose, Dpoint));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
|
|
@ -0,0 +1,225 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SphericalCamera.h
|
||||
* @brief Calibrated camera with spherical projection
|
||||
* @date Aug 26, 2021
|
||||
* @author Luca Carlone
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/Manifold.h>
|
||||
#include <gtsam/base/ThreadsafeException.h>
|
||||
#include <gtsam/base/concepts.h>
|
||||
#include <gtsam/dllexport.h>
|
||||
#include <gtsam/geometry/BearingRange.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/Unit3.h>
|
||||
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Empty calibration. Only needed to play well with other cameras
|
||||
* (e.g., when templating functions wrt cameras), since other cameras
|
||||
* have constuctors in the form ‘camera(pose,calibration)’
|
||||
* @addtogroup geometry
|
||||
* \nosubgrouping
|
||||
*/
|
||||
class GTSAM_EXPORT EmptyCal {
|
||||
public:
|
||||
enum { dimension = 0 };
|
||||
EmptyCal() {}
|
||||
virtual ~EmptyCal() = default;
|
||||
using shared_ptr = boost::shared_ptr<EmptyCal>;
|
||||
void print(const std::string& s) const {
|
||||
std::cout << "empty calibration: " << s << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* A spherical camera class that has a Pose3 and measures bearing vectors.
|
||||
* The camera has an ‘Empty’ calibration and the only 6 dof are the pose
|
||||
* @addtogroup geometry
|
||||
* \nosubgrouping
|
||||
*/
|
||||
class GTSAM_EXPORT SphericalCamera {
|
||||
public:
|
||||
enum { dimension = 6 };
|
||||
|
||||
typedef Unit3 Measurement;
|
||||
typedef std::vector<Unit3> MeasurementVector;
|
||||
typedef EmptyCal CalibrationType;
|
||||
|
||||
private:
|
||||
Pose3 pose_; ///< 3D pose of camera
|
||||
|
||||
protected:
|
||||
EmptyCal::shared_ptr emptyCal_;
|
||||
|
||||
public:
|
||||
/// @}
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/// Default constructor
|
||||
SphericalCamera()
|
||||
: pose_(Pose3::identity()), emptyCal_(boost::make_shared<EmptyCal>()) {}
|
||||
|
||||
/// Constructor with pose
|
||||
explicit SphericalCamera(const Pose3& pose)
|
||||
: pose_(pose), emptyCal_(boost::make_shared<EmptyCal>()) {}
|
||||
|
||||
/// Constructor with empty intrinsics (needed for smart factors)
|
||||
explicit SphericalCamera(const Pose3& pose,
|
||||
const boost::shared_ptr<EmptyCal>& cal)
|
||||
: pose_(pose), emptyCal_(boost::make_shared<EmptyCal>()) {}
|
||||
|
||||
/// @}
|
||||
/// @name Advanced Constructors
|
||||
/// @{
|
||||
explicit SphericalCamera(const Vector& v) : pose_(Pose3::Expmap(v)) {}
|
||||
|
||||
/// Default destructor
|
||||
virtual ~SphericalCamera() = default;
|
||||
|
||||
/// return shared pointer to calibration
|
||||
const boost::shared_ptr<EmptyCal>& sharedCalibration() const {
|
||||
return emptyCal_;
|
||||
}
|
||||
|
||||
/// return calibration
|
||||
const EmptyCal& calibration() const { return *emptyCal_; }
|
||||
|
||||
/// @}
|
||||
/// @name Testable
|
||||
/// @{
|
||||
|
||||
/// assert equality up to a tolerance
|
||||
bool equals(const SphericalCamera& camera, double tol = 1e-9) const;
|
||||
|
||||
/// print
|
||||
virtual void print(const std::string& s = "SphericalCamera") const;
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/// return pose, constant version
|
||||
const Pose3& pose() const { return pose_; }
|
||||
|
||||
/// get rotation
|
||||
const Rot3& rotation() const { return pose_.rotation(); }
|
||||
|
||||
/// get translation
|
||||
const Point3& translation() const { return pose_.translation(); }
|
||||
|
||||
// /// return pose, with derivative
|
||||
// const Pose3& getPose(OptionalJacobian<6, 6> H) const;
|
||||
|
||||
/// @}
|
||||
/// @name Transformations and measurement functions
|
||||
/// @{
|
||||
|
||||
/// Project a point into the image and check depth
|
||||
std::pair<Unit3, bool> projectSafe(const Point3& pw) const;
|
||||
|
||||
/** Project point into the image
|
||||
* (note: there is no CheiralityException for a spherical camera)
|
||||
* @param point 3D point in world coordinates
|
||||
* @return the intrinsic coordinates of the projected point
|
||||
*/
|
||||
Unit3 project2(const Point3& pw, OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 3> Dpoint = boost::none) const;
|
||||
|
||||
/** Project point into the image
|
||||
* (note: there is no CheiralityException for a spherical camera)
|
||||
* @param point 3D direction in world coordinates
|
||||
* @return the intrinsic coordinates of the projected point
|
||||
*/
|
||||
Unit3 project2(const Unit3& pwu, OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 2> Dpoint = boost::none) const;
|
||||
|
||||
/// backproject a 2-dimensional point to a 3-dimensional point at given depth
|
||||
Point3 backproject(const Unit3& p, const double depth) const;
|
||||
|
||||
/// backproject point at infinity
|
||||
Unit3 backprojectPointAtInfinity(const Unit3& p) const;
|
||||
|
||||
/** Project point into the image
|
||||
* (note: there is no CheiralityException for a spherical camera)
|
||||
* @param point 3D point in world coordinates
|
||||
* @return the intrinsic coordinates of the projected point
|
||||
*/
|
||||
Unit3 project(const Point3& point, OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 3> Dpoint = boost::none) const;
|
||||
|
||||
/** Compute reprojection error for a given 3D point in world coordinates
|
||||
* @param point 3D point in world coordinates
|
||||
* @return the tangent space error between the projection and the measurement
|
||||
*/
|
||||
Vector2 reprojectionError(const Point3& point, const Unit3& measured,
|
||||
OptionalJacobian<2, 6> Dpose = boost::none,
|
||||
OptionalJacobian<2, 3> Dpoint = boost::none) const;
|
||||
/// @}
|
||||
|
||||
/// move a cameras according to d
|
||||
SphericalCamera retract(const Vector6& d) const {
|
||||
return SphericalCamera(pose().retract(d));
|
||||
}
|
||||
|
||||
/// return canonical coordinate
|
||||
Vector6 localCoordinates(const SphericalCamera& p) const {
|
||||
return pose().localCoordinates(p.pose());
|
||||
}
|
||||
|
||||
/// for Canonical
|
||||
static SphericalCamera identity() {
|
||||
return SphericalCamera(
|
||||
Pose3::identity()); // assumes that the default constructor is valid
|
||||
}
|
||||
|
||||
/// for Linear Triangulation
|
||||
Matrix34 cameraProjectionMatrix() const {
|
||||
return Matrix34(pose_.inverse().matrix().block(0, 0, 3, 4));
|
||||
}
|
||||
|
||||
/// for Nonlinear Triangulation
|
||||
Vector defaultErrorWhenTriangulatingBehindCamera() const {
|
||||
return Eigen::Matrix<double, traits<Point2>::dimension, 1>::Constant(0.0);
|
||||
}
|
||||
|
||||
/// @deprecated
|
||||
size_t dim() const { return 6; }
|
||||
|
||||
/// @deprecated
|
||||
static size_t Dim() { return 6; }
|
||||
|
||||
private:
|
||||
/** Serialization function */
|
||||
friend class boost::serialization::access;
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar, const unsigned int /*version*/) {
|
||||
ar& BOOST_SERIALIZATION_NVP(pose_);
|
||||
}
|
||||
};
|
||||
// end of class SphericalCamera
|
||||
|
||||
template <>
|
||||
struct traits<SphericalCamera> : public internal::LieGroup<Pose3> {};
|
||||
|
||||
template <>
|
||||
struct traits<const SphericalCamera> : public internal::LieGroup<Pose3> {};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
@ -170,6 +170,11 @@ public:
|
|||
OptionalJacobian<3, 3> H2 = boost::none, OptionalJacobian<3, 0> H3 =
|
||||
boost::none) const;
|
||||
|
||||
/// for Nonlinear Triangulation
|
||||
Vector defaultErrorWhenTriangulatingBehindCamera() const {
|
||||
return Eigen::Matrix<double,traits<Measurement>::dimension,1>::Constant(2.0 * K_->fx());;
|
||||
}
|
||||
|
||||
/// @}
|
||||
|
||||
private:
|
||||
|
|
|
|||
|
|
@ -72,5 +72,5 @@ private:
|
|||
|
||||
/** Pose Concept macros */
|
||||
#define GTSAM_CONCEPT_POSE_INST(T) template class gtsam::PoseConcept<T>;
|
||||
#define GTSAM_CONCEPT_POSE_TYPE(T) typedef gtsam::PoseConcept<T> _gtsam_PoseConcept##T;
|
||||
#define GTSAM_CONCEPT_POSE_TYPE(T) using _gtsam_PoseConcept##T = gtsam::PoseConcept<T>;
|
||||
|
||||
|
|
|
|||
|
|
@ -473,6 +473,9 @@ class Pose3 {
|
|||
Vector logmap(const gtsam::Pose3& pose);
|
||||
Matrix AdjointMap() const;
|
||||
Vector Adjoint(Vector xi) const;
|
||||
Vector AdjointTranspose(Vector xi) const;
|
||||
static Matrix adjointMap(Vector xi);
|
||||
static Vector adjoint(Vector xi, Vector y);
|
||||
static Matrix adjointMap_(Vector xi);
|
||||
static Vector adjoint_(Vector xi, Vector y);
|
||||
static Vector adjointTranspose(Vector xi, Vector y);
|
||||
|
|
|
|||
|
|
@ -145,6 +145,81 @@ TEST(Pose3, Adjoint_full)
|
|||
EXPECT(assert_equal(expected3, Pose3::Expmap(xiprime3), 1e-6));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check Adjoint numerical derivatives
|
||||
TEST(Pose3, Adjoint_jacobians)
|
||||
{
|
||||
Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished();
|
||||
|
||||
// Check evaluation sanity check
|
||||
EQUALITY(static_cast<gtsam::Vector>(T.AdjointMap() * xi), T.Adjoint(xi));
|
||||
EQUALITY(static_cast<gtsam::Vector>(T2.AdjointMap() * xi), T2.Adjoint(xi));
|
||||
EQUALITY(static_cast<gtsam::Vector>(T3.AdjointMap() * xi), T3.Adjoint(xi));
|
||||
|
||||
// Check jacobians
|
||||
Matrix6 actualH1, actualH2, expectedH1, expectedH2;
|
||||
std::function<Vector6(const Pose3&, const Vector6&)> Adjoint_proxy =
|
||||
[&](const Pose3& T, const Vector6& xi) { return T.Adjoint(xi); };
|
||||
|
||||
T.Adjoint(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(Adjoint_proxy, T, xi);
|
||||
expectedH2 = numericalDerivative22(Adjoint_proxy, T, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T2.Adjoint(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(Adjoint_proxy, T2, xi);
|
||||
expectedH2 = numericalDerivative22(Adjoint_proxy, T2, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T3.Adjoint(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(Adjoint_proxy, T3, xi);
|
||||
expectedH2 = numericalDerivative22(Adjoint_proxy, T3, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check AdjointTranspose and jacobians
|
||||
TEST(Pose3, AdjointTranspose)
|
||||
{
|
||||
Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished();
|
||||
|
||||
// Check evaluation
|
||||
EQUALITY(static_cast<Vector>(T.AdjointMap().transpose() * xi),
|
||||
T.AdjointTranspose(xi));
|
||||
EQUALITY(static_cast<Vector>(T2.AdjointMap().transpose() * xi),
|
||||
T2.AdjointTranspose(xi));
|
||||
EQUALITY(static_cast<Vector>(T3.AdjointMap().transpose() * xi),
|
||||
T3.AdjointTranspose(xi));
|
||||
|
||||
// Check jacobians
|
||||
Matrix6 actualH1, actualH2, expectedH1, expectedH2;
|
||||
std::function<Vector6(const Pose3&, const Vector6&)> AdjointTranspose_proxy =
|
||||
[&](const Pose3& T, const Vector6& xi) {
|
||||
return T.AdjointTranspose(xi);
|
||||
};
|
||||
|
||||
T.AdjointTranspose(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T, xi);
|
||||
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T2.AdjointTranspose(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T2, xi);
|
||||
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T2, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T3.AdjointTranspose(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T3, xi);
|
||||
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T3, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// assert that T*wedge(xi)*T^-1 is equal to wedge(Ad_T(xi))
|
||||
TEST(Pose3, Adjoint_hat)
|
||||
|
|
@ -837,16 +912,20 @@ Vector6 testDerivAdjoint(const Vector6& xi, const Vector6& v) {
|
|||
}
|
||||
|
||||
TEST( Pose3, adjoint) {
|
||||
Vector expected = testDerivAdjoint(screwPose3::xi, screwPose3::xi);
|
||||
Vector6 v = (Vector6() << 1, 2, 3, 4, 5, 6).finished();
|
||||
Vector expected = testDerivAdjoint(screwPose3::xi, v);
|
||||
|
||||
Matrix actualH;
|
||||
Vector actual = Pose3::adjoint(screwPose3::xi, screwPose3::xi, actualH);
|
||||
Matrix actualH1, actualH2;
|
||||
Vector actual = Pose3::adjoint(screwPose3::xi, v, actualH1, actualH2);
|
||||
|
||||
Matrix numericalH = numericalDerivative21<Vector6, Vector6, Vector6>(
|
||||
testDerivAdjoint, screwPose3::xi, screwPose3::xi, 1e-5);
|
||||
Matrix numericalH1 = numericalDerivative21<Vector6, Vector6, Vector6>(
|
||||
testDerivAdjoint, screwPose3::xi, v, 1e-5);
|
||||
Matrix numericalH2 = numericalDerivative22<Vector6, Vector6, Vector6>(
|
||||
testDerivAdjoint, screwPose3::xi, v, 1e-5);
|
||||
|
||||
EXPECT(assert_equal(expected,actual,1e-5));
|
||||
EXPECT(assert_equal(numericalH,actualH,1e-5));
|
||||
EXPECT(assert_equal(numericalH1,actualH1,1e-5));
|
||||
EXPECT(assert_equal(numericalH2,actualH2,1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -859,14 +938,17 @@ TEST( Pose3, adjointTranspose) {
|
|||
Vector v = (Vector(6) << 0.04, 0.05, 0.06, 4.0, 5.0, 6.0).finished();
|
||||
Vector expected = testDerivAdjointTranspose(xi, v);
|
||||
|
||||
Matrix actualH;
|
||||
Vector actual = Pose3::adjointTranspose(xi, v, actualH);
|
||||
Matrix actualH1, actualH2;
|
||||
Vector actual = Pose3::adjointTranspose(xi, v, actualH1, actualH2);
|
||||
|
||||
Matrix numericalH = numericalDerivative21<Vector6, Vector6, Vector6>(
|
||||
Matrix numericalH1 = numericalDerivative21<Vector6, Vector6, Vector6>(
|
||||
testDerivAdjointTranspose, xi, v, 1e-5);
|
||||
Matrix numericalH2 = numericalDerivative22<Vector6, Vector6, Vector6>(
|
||||
testDerivAdjointTranspose, xi, v, 1e-5);
|
||||
|
||||
EXPECT(assert_equal(expected,actual,1e-15));
|
||||
EXPECT(assert_equal(numericalH,actualH,1e-5));
|
||||
EXPECT(assert_equal(numericalH1,actualH1,1e-5));
|
||||
EXPECT(assert_equal(numericalH2,actualH2,1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -122,6 +122,21 @@ TEST( Rot3, AxisAngle)
|
|||
CHECK(assert_equal(expected,actual3,1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Rot3, AxisAngle2)
|
||||
{
|
||||
// constructor from a rotation matrix, as doubles in *row-major* order.
|
||||
Rot3 R1(-0.999957, 0.00922903, 0.00203116, 0.00926964, 0.999739, 0.0208927, -0.0018374, 0.0209105, -0.999781);
|
||||
|
||||
Unit3 actualAxis;
|
||||
double actualAngle;
|
||||
// convert Rot3 to quaternion using GTSAM
|
||||
std::tie(actualAxis, actualAngle) = R1.axisAngle();
|
||||
|
||||
double expectedAngle = 3.1396582;
|
||||
CHECK(assert_equal(expectedAngle, actualAngle, 1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( Rot3, Rodrigues)
|
||||
{
|
||||
|
|
@ -181,13 +196,13 @@ TEST( Rot3, retract)
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Rot3, log) {
|
||||
TEST( Rot3, log) {
|
||||
static const double PI = boost::math::constants::pi<double>();
|
||||
Vector w;
|
||||
Rot3 R;
|
||||
|
||||
#define CHECK_OMEGA(X, Y, Z) \
|
||||
w = (Vector(3) << X, Y, Z).finished(); \
|
||||
w = (Vector(3) << (X), (Y), (Z)).finished(); \
|
||||
R = Rot3::Rodrigues(w); \
|
||||
EXPECT(assert_equal(w, Rot3::Logmap(R), 1e-12));
|
||||
|
||||
|
|
@ -219,17 +234,17 @@ TEST(Rot3, log) {
|
|||
CHECK_OMEGA(0, 0, PI)
|
||||
|
||||
// Windows and Linux have flipped sign in quaternion mode
|
||||
#if !defined(__APPLE__) && defined(GTSAM_USE_QUATERNIONS)
|
||||
//#if !defined(__APPLE__) && defined(GTSAM_USE_QUATERNIONS)
|
||||
w = (Vector(3) << x * PI, y * PI, z * PI).finished();
|
||||
R = Rot3::Rodrigues(w);
|
||||
EXPECT(assert_equal(Vector(-w), Rot3::Logmap(R), 1e-12));
|
||||
#else
|
||||
CHECK_OMEGA(x * PI, y * PI, z * PI)
|
||||
#endif
|
||||
//#else
|
||||
// CHECK_OMEGA(x * PI, y * PI, z * PI)
|
||||
//#endif
|
||||
|
||||
// Check 360 degree rotations
|
||||
#define CHECK_OMEGA_ZERO(X, Y, Z) \
|
||||
w = (Vector(3) << X, Y, Z).finished(); \
|
||||
w = (Vector(3) << (X), (Y), (Z)).finished(); \
|
||||
R = Rot3::Rodrigues(w); \
|
||||
EXPECT(assert_equal((Vector)Z_3x1, Rot3::Logmap(R)));
|
||||
|
||||
|
|
@ -247,15 +262,15 @@ TEST(Rot3, log) {
|
|||
// Rot3's Logmap returns different, but equivalent compacted
|
||||
// axis-angle vectors depending on whether Rot3 is implemented
|
||||
// by Quaternions or SO3.
|
||||
#if defined(GTSAM_USE_QUATERNIONS)
|
||||
// Quaternion bounds angle to [-pi, pi] resulting in ~179.9 degrees
|
||||
EXPECT(assert_equal(Vector3(0.264451979, -0.742197651, -3.04098211),
|
||||
#if defined(GTSAM_USE_QUATERNIONS)
|
||||
// Quaternion bounds angle to [-pi, pi] resulting in ~179.9 degrees
|
||||
EXPECT(assert_equal(Vector3(0.264451979, -0.742197651, -3.04098211),
|
||||
(Vector)Rot3::Logmap(Rlund), 1e-8));
|
||||
#else
|
||||
// SO3 will be approximate because of the non-orthogonality
|
||||
EXPECT(assert_equal(Vector3(0.264452, -0.742197708, -3.04098184),
|
||||
(Vector)Rot3::Logmap(Rlund), 1e-8));
|
||||
#else
|
||||
// SO3 does not bound angle resulting in ~180.1 degrees
|
||||
EXPECT(assert_equal(Vector3(-0.264544406, 0.742217405, 3.04117314),
|
||||
(Vector)Rot3::Logmap(Rlund), 1e-8));
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -0,0 +1,163 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SphericalCamera.h
|
||||
* @brief Calibrated camera with spherical projection
|
||||
* @date Aug 26, 2021
|
||||
* @author Luca Carlone
|
||||
*/
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/numericalDerivative.h>
|
||||
#include <gtsam/geometry/SphericalCamera.h>
|
||||
|
||||
#include <cmath>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std::placeholders;
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
typedef SphericalCamera Camera;
|
||||
|
||||
// static const Cal3_S2 K(625, 625, 0, 0, 0);
|
||||
//
|
||||
static const Pose3 pose(Rot3(Vector3(1, -1, -1).asDiagonal()),
|
||||
Point3(0, 0, 0.5));
|
||||
static const Camera camera(pose);
|
||||
//
|
||||
static const Pose3 pose1(Rot3(), Point3(0, 1, 0.5));
|
||||
static const Camera camera1(pose1);
|
||||
|
||||
static const Point3 point1(-0.08, -0.08, 0.0);
|
||||
static const Point3 point2(-0.08, 0.08, 0.0);
|
||||
static const Point3 point3(0.08, 0.08, 0.0);
|
||||
static const Point3 point4(0.08, -0.08, 0.0);
|
||||
|
||||
// manually computed in matlab
|
||||
static const Unit3 bearing1(-0.156054862928174, 0.156054862928174,
|
||||
0.975342893301088);
|
||||
static const Unit3 bearing2(-0.156054862928174, -0.156054862928174,
|
||||
0.975342893301088);
|
||||
static const Unit3 bearing3(0.156054862928174, -0.156054862928174,
|
||||
0.975342893301088);
|
||||
static const Unit3 bearing4(0.156054862928174, 0.156054862928174,
|
||||
0.975342893301088);
|
||||
|
||||
static double depth = 0.512640224719052;
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, constructor) {
|
||||
EXPECT(assert_equal(pose, camera.pose()));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, project) {
|
||||
// expected from manual calculation in Matlab
|
||||
EXPECT(assert_equal(camera.project(point1), bearing1));
|
||||
EXPECT(assert_equal(camera.project(point2), bearing2));
|
||||
EXPECT(assert_equal(camera.project(point3), bearing3));
|
||||
EXPECT(assert_equal(camera.project(point4), bearing4));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, backproject) {
|
||||
EXPECT(assert_equal(camera.backproject(bearing1, depth), point1));
|
||||
EXPECT(assert_equal(camera.backproject(bearing2, depth), point2));
|
||||
EXPECT(assert_equal(camera.backproject(bearing3, depth), point3));
|
||||
EXPECT(assert_equal(camera.backproject(bearing4, depth), point4));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, backproject2) {
|
||||
Point3 origin(0, 0, 0);
|
||||
Rot3 rot(1., 0., 0., 0., 0., 1., 0., -1., 0.); // a camera1 looking down
|
||||
Camera camera(Pose3(rot, origin));
|
||||
|
||||
Point3 actual = camera.backproject(Unit3(0, 0, 1), 1.);
|
||||
Point3 expected(0., 1., 0.);
|
||||
pair<Unit3, bool> x = camera.projectSafe(expected);
|
||||
|
||||
EXPECT(assert_equal(expected, actual));
|
||||
EXPECT(assert_equal(Unit3(0, 0, 1), x.first));
|
||||
EXPECT(x.second);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
static Unit3 project3(const Pose3& pose, const Point3& point) {
|
||||
return Camera(pose).project(point);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, Dproject) {
|
||||
Matrix Dpose, Dpoint;
|
||||
Unit3 result = camera.project(point1, Dpose, Dpoint);
|
||||
Matrix numerical_pose = numericalDerivative21(project3, pose, point1);
|
||||
Matrix numerical_point = numericalDerivative22(project3, pose, point1);
|
||||
EXPECT(assert_equal(bearing1, result));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
static Vector2 reprojectionError2(const Pose3& pose, const Point3& point,
|
||||
const Unit3& measured) {
|
||||
return Camera(pose).reprojectionError(point, measured);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, reprojectionError) {
|
||||
Matrix Dpose, Dpoint;
|
||||
Vector2 result = camera.reprojectionError(point1, bearing1, Dpose, Dpoint);
|
||||
Matrix numerical_pose =
|
||||
numericalDerivative31(reprojectionError2, pose, point1, bearing1);
|
||||
Matrix numerical_point =
|
||||
numericalDerivative32(reprojectionError2, pose, point1, bearing1);
|
||||
EXPECT(assert_equal(Vector2(0.0, 0.0), result));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SphericalCamera, reprojectionError_noisy) {
|
||||
Matrix Dpose, Dpoint;
|
||||
Unit3 bearing_noisy = bearing1.retract(Vector2(0.01, 0.05));
|
||||
Vector2 result =
|
||||
camera.reprojectionError(point1, bearing_noisy, Dpose, Dpoint);
|
||||
Matrix numerical_pose =
|
||||
numericalDerivative31(reprojectionError2, pose, point1, bearing_noisy);
|
||||
Matrix numerical_point =
|
||||
numericalDerivative32(reprojectionError2, pose, point1, bearing_noisy);
|
||||
EXPECT(assert_equal(Vector2(-0.050282, 0.00833482), result, 1e-5));
|
||||
EXPECT(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
EXPECT(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Add a test with more arbitrary rotation
|
||||
TEST(SphericalCamera, Dproject2) {
|
||||
static const Pose3 pose1(Rot3::Ypr(0.1, -0.1, 0.4), Point3(0, 0, -10));
|
||||
static const Camera camera(pose1);
|
||||
Matrix Dpose, Dpoint;
|
||||
camera.project2(point1, Dpose, Dpoint);
|
||||
Matrix numerical_pose = numericalDerivative21(project3, pose1, point1);
|
||||
Matrix numerical_point = numericalDerivative22(project3, pose1, point1);
|
||||
CHECK(assert_equal(numerical_pose, Dpose, 1e-7));
|
||||
CHECK(assert_equal(numerical_point, Dpoint, 1e-7));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -10,22 +10,23 @@
|
|||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* testTriangulation.cpp
|
||||
*
|
||||
* Created on: July 30th, 2013
|
||||
* Author: cbeall3
|
||||
* @file testTriangulation.cpp
|
||||
* @brief triangulation utilities
|
||||
* @date July 30th, 2013
|
||||
* @author Chris Beall (cbeall3)
|
||||
* @author Luca Carlone
|
||||
*/
|
||||
|
||||
#include <gtsam/geometry/triangulation.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/StereoCamera.h>
|
||||
#include <gtsam/geometry/CameraSet.h>
|
||||
#include <gtsam/geometry/Cal3Bundler.h>
|
||||
#include <gtsam/slam/StereoFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/ExpressionFactor.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
#include <gtsam/geometry/Cal3Bundler.h>
|
||||
#include <gtsam/geometry/CameraSet.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/SphericalCamera.h>
|
||||
#include <gtsam/geometry/StereoCamera.h>
|
||||
#include <gtsam/geometry/triangulation.h>
|
||||
#include <gtsam/nonlinear/ExpressionFactor.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/slam/StereoFactor.h>
|
||||
|
||||
#include <boost/assign.hpp>
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
|
|
@ -36,7 +37,7 @@ using namespace boost::assign;
|
|||
|
||||
// Some common constants
|
||||
|
||||
static const boost::shared_ptr<Cal3_S2> sharedCal = //
|
||||
static const boost::shared_ptr<Cal3_S2> sharedCal = //
|
||||
boost::make_shared<Cal3_S2>(1500, 1200, 0, 640, 480);
|
||||
|
||||
// Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
|
|
@ -57,8 +58,7 @@ Point2 z2 = camera2.project(landmark);
|
|||
|
||||
//******************************************************************************
|
||||
// Simple test with a well-behaved two camera situation
|
||||
TEST( triangulation, twoPoses) {
|
||||
|
||||
TEST(triangulation, twoPoses) {
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
|
|
@ -69,36 +69,36 @@ TEST( triangulation, twoPoses) {
|
|||
|
||||
// 1. Test simple DLT, perfect in no noise situation
|
||||
bool optimize = false;
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual1, 1e-7));
|
||||
|
||||
// 2. test with optimization on, same answer
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-7));
|
||||
|
||||
// 3. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814)
|
||||
// 3. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
measurements.at(0) += Point2(0.1, 0.5);
|
||||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
optimize = false;
|
||||
boost::optional<Point3> actual3 = //
|
||||
triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual3 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual3, 1e-4));
|
||||
|
||||
// 4. Now with optimization on
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual4 = //
|
||||
triangulatePoint3(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual4 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19814), *actual4, 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Similar, but now with Bundler calibration
|
||||
TEST( triangulation, twoPosesBundler) {
|
||||
|
||||
boost::shared_ptr<Cal3Bundler> bundlerCal = //
|
||||
TEST(triangulation, twoPosesBundler) {
|
||||
boost::shared_ptr<Cal3Bundler> bundlerCal = //
|
||||
boost::make_shared<Cal3Bundler>(1500, 0, 0, 640, 480);
|
||||
PinholeCamera<Cal3Bundler> camera1(pose1, *bundlerCal);
|
||||
PinholeCamera<Cal3Bundler> camera2(pose2, *bundlerCal);
|
||||
|
|
@ -116,37 +116,38 @@ TEST( triangulation, twoPosesBundler) {
|
|||
bool optimize = true;
|
||||
double rank_tol = 1e-9;
|
||||
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3(poses, bundlerCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3<Cal3Bundler>(poses, bundlerCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-7));
|
||||
|
||||
// Add some noise and try again
|
||||
measurements.at(0) += Point2(0.1, 0.5);
|
||||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3(poses, bundlerCal, measurements, rank_tol, optimize);
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3Bundler>(poses, bundlerCal, measurements, rank_tol, optimize);
|
||||
EXPECT(assert_equal(Point3(4.995, 0.499167, 1.19847), *actual2, 1e-4));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, fourPoses) {
|
||||
TEST(triangulation, fourPoses) {
|
||||
vector<Pose3> poses;
|
||||
Point2Vector measurements;
|
||||
|
||||
poses += pose1, pose2;
|
||||
measurements += z1, z2;
|
||||
|
||||
boost::optional<Point3> actual = triangulatePoint3(poses, sharedCal,
|
||||
measurements);
|
||||
boost::optional<Point3> actual =
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
|
||||
// 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814)
|
||||
// 2. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
measurements.at(0) += Point2(0.1, 0.5);
|
||||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3(poses, sharedCal, measurements);
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-2));
|
||||
|
||||
// 3. Add a slightly rotated third camera above, again with measurement noise
|
||||
|
|
@ -157,13 +158,13 @@ TEST( triangulation, fourPoses) {
|
|||
poses += pose3;
|
||||
measurements += z3 + Point2(0.1, -0.1);
|
||||
|
||||
boost::optional<Point3> triangulated_3cameras = //
|
||||
triangulatePoint3(poses, sharedCal, measurements);
|
||||
boost::optional<Point3> triangulated_3cameras = //
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2));
|
||||
|
||||
// Again with nonlinear optimization
|
||||
boost::optional<Point3> triangulated_3cameras_opt = triangulatePoint3(poses,
|
||||
sharedCal, measurements, 1e-9, true);
|
||||
boost::optional<Point3> triangulated_3cameras_opt =
|
||||
triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements, 1e-9, true);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2));
|
||||
|
||||
// 4. Test failure: Add a 4th camera facing the wrong way
|
||||
|
|
@ -176,13 +177,13 @@ TEST( triangulation, fourPoses) {
|
|||
poses += pose4;
|
||||
measurements += Point2(400, 400);
|
||||
|
||||
CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements),
|
||||
TriangulationCheiralityException);
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
|
||||
TriangulationCheiralityException);
|
||||
#endif
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, fourPoses_distinct_Ks) {
|
||||
TEST(triangulation, fourPoses_distinct_Ks) {
|
||||
Cal3_S2 K1(1500, 1200, 0, 640, 480);
|
||||
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, K1);
|
||||
|
|
@ -195,22 +196,23 @@ TEST( triangulation, fourPoses_distinct_Ks) {
|
|||
Point2 z1 = camera1.project(landmark);
|
||||
Point2 z2 = camera2.project(landmark);
|
||||
|
||||
CameraSet<PinholeCamera<Cal3_S2> > cameras;
|
||||
CameraSet<PinholeCamera<Cal3_S2>> cameras;
|
||||
Point2Vector measurements;
|
||||
|
||||
cameras += camera1, camera2;
|
||||
measurements += z1, z2;
|
||||
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3(cameras, measurements);
|
||||
boost::optional<Point3> actual = //
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
|
||||
// 2. Add some noise and try again: result should be ~ (4.995, 0.499167, 1.19814)
|
||||
// 2. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
measurements.at(0) += Point2(0.1, 0.5);
|
||||
measurements.at(1) += Point2(-0.2, 0.3);
|
||||
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3(cameras, measurements);
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-2));
|
||||
|
||||
// 3. Add a slightly rotated third camera above, again with measurement noise
|
||||
|
|
@ -222,13 +224,13 @@ TEST( triangulation, fourPoses_distinct_Ks) {
|
|||
cameras += camera3;
|
||||
measurements += z3 + Point2(0.1, -0.1);
|
||||
|
||||
boost::optional<Point3> triangulated_3cameras = //
|
||||
triangulatePoint3(cameras, measurements);
|
||||
boost::optional<Point3> triangulated_3cameras = //
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras, 1e-2));
|
||||
|
||||
// Again with nonlinear optimization
|
||||
boost::optional<Point3> triangulated_3cameras_opt = triangulatePoint3(cameras,
|
||||
measurements, 1e-9, true);
|
||||
boost::optional<Point3> triangulated_3cameras_opt =
|
||||
triangulatePoint3<Cal3_S2>(cameras, measurements, 1e-9, true);
|
||||
EXPECT(assert_equal(landmark, *triangulated_3cameras_opt, 1e-2));
|
||||
|
||||
// 4. Test failure: Add a 4th camera facing the wrong way
|
||||
|
|
@ -241,13 +243,13 @@ TEST( triangulation, fourPoses_distinct_Ks) {
|
|||
|
||||
cameras += camera4;
|
||||
measurements += Point2(400, 400);
|
||||
CHECK_EXCEPTION(triangulatePoint3(cameras, measurements),
|
||||
TriangulationCheiralityException);
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(cameras, measurements),
|
||||
TriangulationCheiralityException);
|
||||
#endif
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, outliersAndFarLandmarks) {
|
||||
TEST(triangulation, outliersAndFarLandmarks) {
|
||||
Cal3_S2 K1(1500, 1200, 0, 640, 480);
|
||||
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, K1);
|
||||
|
|
@ -260,24 +262,29 @@ TEST( triangulation, outliersAndFarLandmarks) {
|
|||
Point2 z1 = camera1.project(landmark);
|
||||
Point2 z2 = camera2.project(landmark);
|
||||
|
||||
CameraSet<PinholeCamera<Cal3_S2> > cameras;
|
||||
CameraSet<PinholeCamera<Cal3_S2>> cameras;
|
||||
Point2Vector measurements;
|
||||
|
||||
cameras += camera1, camera2;
|
||||
measurements += z1, z2;
|
||||
|
||||
double landmarkDistanceThreshold = 10; // landmark is closer than that
|
||||
TriangulationParameters params(1.0, false, landmarkDistanceThreshold); // all default except landmarkDistanceThreshold
|
||||
TriangulationResult actual = triangulateSafe(cameras,measurements,params);
|
||||
double landmarkDistanceThreshold = 10; // landmark is closer than that
|
||||
TriangulationParameters params(
|
||||
1.0, false, landmarkDistanceThreshold); // all default except
|
||||
// landmarkDistanceThreshold
|
||||
TriangulationResult actual = triangulateSafe(cameras, measurements, params);
|
||||
EXPECT(assert_equal(landmark, *actual, 1e-2));
|
||||
EXPECT(actual.valid());
|
||||
|
||||
landmarkDistanceThreshold = 4; // landmark is farther than that
|
||||
TriangulationParameters params2(1.0, false, landmarkDistanceThreshold); // all default except landmarkDistanceThreshold
|
||||
actual = triangulateSafe(cameras,measurements,params2);
|
||||
landmarkDistanceThreshold = 4; // landmark is farther than that
|
||||
TriangulationParameters params2(
|
||||
1.0, false, landmarkDistanceThreshold); // all default except
|
||||
// landmarkDistanceThreshold
|
||||
actual = triangulateSafe(cameras, measurements, params2);
|
||||
EXPECT(actual.farPoint());
|
||||
|
||||
// 3. Add a slightly rotated third camera above with a wrong measurement (OUTLIER)
|
||||
// 3. Add a slightly rotated third camera above with a wrong measurement
|
||||
// (OUTLIER)
|
||||
Pose3 pose3 = pose1 * Pose3(Rot3::Ypr(0.1, 0.2, 0.1), Point3(0.1, -2, -.1));
|
||||
Cal3_S2 K3(700, 500, 0, 640, 480);
|
||||
PinholeCamera<Cal3_S2> camera3(pose3, K3);
|
||||
|
|
@ -286,21 +293,23 @@ TEST( triangulation, outliersAndFarLandmarks) {
|
|||
cameras += camera3;
|
||||
measurements += z3 + Point2(10, -10);
|
||||
|
||||
landmarkDistanceThreshold = 10; // landmark is closer than that
|
||||
double outlierThreshold = 100; // loose, the outlier is going to pass
|
||||
TriangulationParameters params3(1.0, false, landmarkDistanceThreshold,outlierThreshold);
|
||||
actual = triangulateSafe(cameras,measurements,params3);
|
||||
landmarkDistanceThreshold = 10; // landmark is closer than that
|
||||
double outlierThreshold = 100; // loose, the outlier is going to pass
|
||||
TriangulationParameters params3(1.0, false, landmarkDistanceThreshold,
|
||||
outlierThreshold);
|
||||
actual = triangulateSafe(cameras, measurements, params3);
|
||||
EXPECT(actual.valid());
|
||||
|
||||
// now set stricter threshold for outlier rejection
|
||||
outlierThreshold = 5; // tighter, the outlier is not going to pass
|
||||
TriangulationParameters params4(1.0, false, landmarkDistanceThreshold,outlierThreshold);
|
||||
actual = triangulateSafe(cameras,measurements,params4);
|
||||
outlierThreshold = 5; // tighter, the outlier is not going to pass
|
||||
TriangulationParameters params4(1.0, false, landmarkDistanceThreshold,
|
||||
outlierThreshold);
|
||||
actual = triangulateSafe(cameras, measurements, params4);
|
||||
EXPECT(actual.outlier());
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, twoIdenticalPoses) {
|
||||
TEST(triangulation, twoIdenticalPoses) {
|
||||
// create first camera. Looking along X-axis, 1 meter above ground plane (x-y)
|
||||
PinholeCamera<Cal3_S2> camera1(pose1, *sharedCal);
|
||||
|
||||
|
|
@ -313,12 +322,12 @@ TEST( triangulation, twoIdenticalPoses) {
|
|||
poses += pose1, pose1;
|
||||
measurements += z1, z1;
|
||||
|
||||
CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements),
|
||||
TriangulationUnderconstrainedException);
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
|
||||
TriangulationUnderconstrainedException);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, onePose) {
|
||||
TEST(triangulation, onePose) {
|
||||
// we expect this test to fail with a TriangulationUnderconstrainedException
|
||||
// because there's only one camera observation
|
||||
|
||||
|
|
@ -326,28 +335,26 @@ TEST( triangulation, onePose) {
|
|||
Point2Vector measurements;
|
||||
|
||||
poses += Pose3();
|
||||
measurements += Point2(0,0);
|
||||
measurements += Point2(0, 0);
|
||||
|
||||
CHECK_EXCEPTION(triangulatePoint3(poses, sharedCal, measurements),
|
||||
TriangulationUnderconstrainedException);
|
||||
CHECK_EXCEPTION(triangulatePoint3<Cal3_S2>(poses, sharedCal, measurements),
|
||||
TriangulationUnderconstrainedException);
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST( triangulation, StereotriangulateNonlinear ) {
|
||||
|
||||
auto stereoK = boost::make_shared<Cal3_S2Stereo>(1733.75, 1733.75, 0, 689.645, 508.835, 0.0699612);
|
||||
TEST(triangulation, StereotriangulateNonlinear) {
|
||||
auto stereoK = boost::make_shared<Cal3_S2Stereo>(1733.75, 1733.75, 0, 689.645,
|
||||
508.835, 0.0699612);
|
||||
|
||||
// two camera poses m1, m2
|
||||
Matrix4 m1, m2;
|
||||
m1 << 0.796888717, 0.603404026, -0.0295271487, 46.6673779,
|
||||
0.592783835, -0.77156583, 0.230856632, 66.2186159,
|
||||
0.116517574, -0.201470143, -0.9725393, -4.28382528,
|
||||
0, 0, 0, 1;
|
||||
m1 << 0.796888717, 0.603404026, -0.0295271487, 46.6673779, 0.592783835,
|
||||
-0.77156583, 0.230856632, 66.2186159, 0.116517574, -0.201470143,
|
||||
-0.9725393, -4.28382528, 0, 0, 0, 1;
|
||||
|
||||
m2 << -0.955959025, -0.29288915, -0.0189328569, 45.7169799,
|
||||
-0.29277519, 0.947083213, 0.131587097, 65.843136,
|
||||
-0.0206094928, 0.131334858, -0.991123524, -4.3525033,
|
||||
0, 0, 0, 1;
|
||||
m2 << -0.955959025, -0.29288915, -0.0189328569, 45.7169799, -0.29277519,
|
||||
0.947083213, 0.131587097, 65.843136, -0.0206094928, 0.131334858,
|
||||
-0.991123524, -4.3525033, 0, 0, 0, 1;
|
||||
|
||||
typedef CameraSet<StereoCamera> Cameras;
|
||||
Cameras cameras;
|
||||
|
|
@ -358,18 +365,19 @@ TEST( triangulation, StereotriangulateNonlinear ) {
|
|||
measurements += StereoPoint2(226.936, 175.212, 424.469);
|
||||
measurements += StereoPoint2(339.571, 285.547, 669.973);
|
||||
|
||||
Point3 initial = Point3(46.0536958, 66.4621179, -6.56285929); // error: 96.5715555191
|
||||
Point3 initial =
|
||||
Point3(46.0536958, 66.4621179, -6.56285929); // error: 96.5715555191
|
||||
|
||||
Point3 actual = triangulateNonlinear(cameras, measurements, initial);
|
||||
Point3 actual = triangulateNonlinear<StereoCamera>(cameras, measurements, initial);
|
||||
|
||||
Point3 expected(46.0484569, 66.4710686, -6.55046613); // error: 0.763510644187
|
||||
Point3 expected(46.0484569, 66.4710686,
|
||||
-6.55046613); // error: 0.763510644187
|
||||
|
||||
EXPECT(assert_equal(expected, actual, 1e-4));
|
||||
|
||||
|
||||
// regular stereo factor comparison - expect very similar result as above
|
||||
{
|
||||
typedef GenericStereoFactor<Pose3,Point3> StereoFactor;
|
||||
typedef GenericStereoFactor<Pose3, Point3> StereoFactor;
|
||||
|
||||
Values values;
|
||||
values.insert(Symbol('x', 1), Pose3(m1));
|
||||
|
|
@ -378,17 +386,19 @@ TEST( triangulation, StereotriangulateNonlinear ) {
|
|||
|
||||
NonlinearFactorGraph graph;
|
||||
static SharedNoiseModel unit(noiseModel::Unit::Create(3));
|
||||
graph.emplace_shared<StereoFactor>(measurements[0], unit, Symbol('x',1), Symbol('l',1), stereoK);
|
||||
graph.emplace_shared<StereoFactor>(measurements[1], unit, Symbol('x',2), Symbol('l',1), stereoK);
|
||||
graph.emplace_shared<StereoFactor>(measurements[0], unit, Symbol('x', 1),
|
||||
Symbol('l', 1), stereoK);
|
||||
graph.emplace_shared<StereoFactor>(measurements[1], unit, Symbol('x', 2),
|
||||
Symbol('l', 1), stereoK);
|
||||
|
||||
const SharedDiagonal posePrior = noiseModel::Isotropic::Sigma(6, 1e-9);
|
||||
graph.addPrior(Symbol('x',1), Pose3(m1), posePrior);
|
||||
graph.addPrior(Symbol('x',2), Pose3(m2), posePrior);
|
||||
graph.addPrior(Symbol('x', 1), Pose3(m1), posePrior);
|
||||
graph.addPrior(Symbol('x', 2), Pose3(m2), posePrior);
|
||||
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values);
|
||||
Values result = optimizer.optimize();
|
||||
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l',1)), 1e-4));
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l', 1)), 1e-4));
|
||||
}
|
||||
|
||||
// use Triangulation Factor directly - expect same result as above
|
||||
|
|
@ -399,13 +409,15 @@ TEST( triangulation, StereotriangulateNonlinear ) {
|
|||
NonlinearFactorGraph graph;
|
||||
static SharedNoiseModel unit(noiseModel::Unit::Create(3));
|
||||
|
||||
graph.emplace_shared<TriangulationFactor<StereoCamera> >(cameras[0], measurements[0], unit, Symbol('l',1));
|
||||
graph.emplace_shared<TriangulationFactor<StereoCamera> >(cameras[1], measurements[1], unit, Symbol('l',1));
|
||||
graph.emplace_shared<TriangulationFactor<StereoCamera>>(
|
||||
cameras[0], measurements[0], unit, Symbol('l', 1));
|
||||
graph.emplace_shared<TriangulationFactor<StereoCamera>>(
|
||||
cameras[1], measurements[1], unit, Symbol('l', 1));
|
||||
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values);
|
||||
Values result = optimizer.optimize();
|
||||
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l',1)), 1e-4));
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l', 1)), 1e-4));
|
||||
}
|
||||
|
||||
// use ExpressionFactor - expect same result as above
|
||||
|
|
@ -416,11 +428,13 @@ TEST( triangulation, StereotriangulateNonlinear ) {
|
|||
NonlinearFactorGraph graph;
|
||||
static SharedNoiseModel unit(noiseModel::Unit::Create(3));
|
||||
|
||||
Expression<Point3> point_(Symbol('l',1));
|
||||
Expression<Point3> point_(Symbol('l', 1));
|
||||
Expression<StereoCamera> camera0_(cameras[0]);
|
||||
Expression<StereoCamera> camera1_(cameras[1]);
|
||||
Expression<StereoPoint2> project0_(camera0_, &StereoCamera::project2, point_);
|
||||
Expression<StereoPoint2> project1_(camera1_, &StereoCamera::project2, point_);
|
||||
Expression<StereoPoint2> project0_(camera0_, &StereoCamera::project2,
|
||||
point_);
|
||||
Expression<StereoPoint2> project1_(camera1_, &StereoCamera::project2,
|
||||
point_);
|
||||
|
||||
graph.addExpressionFactor(unit, measurements[0], project0_);
|
||||
graph.addExpressionFactor(unit, measurements[1], project1_);
|
||||
|
|
@ -428,10 +442,172 @@ TEST( triangulation, StereotriangulateNonlinear ) {
|
|||
LevenbergMarquardtOptimizer optimizer(graph, values);
|
||||
Values result = optimizer.optimize();
|
||||
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l',1)), 1e-4));
|
||||
EXPECT(assert_equal(expected, result.at<Point3>(Symbol('l', 1)), 1e-4));
|
||||
}
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
// Simple test with a well-behaved two camera situation
|
||||
TEST(triangulation, twoPoses_sphericalCamera) {
|
||||
vector<Pose3> poses;
|
||||
std::vector<Unit3> measurements;
|
||||
|
||||
// Project landmark into two cameras and triangulate
|
||||
SphericalCamera cam1(pose1);
|
||||
SphericalCamera cam2(pose2);
|
||||
Unit3 u1 = cam1.project(landmark);
|
||||
Unit3 u2 = cam2.project(landmark);
|
||||
|
||||
poses += pose1, pose2;
|
||||
measurements += u1, u2;
|
||||
|
||||
CameraSet<SphericalCamera> cameras;
|
||||
cameras.push_back(cam1);
|
||||
cameras.push_back(cam2);
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
|
||||
// 1. Test linear triangulation via DLT
|
||||
auto projection_matrices = projectionMatricesFromCameras(cameras);
|
||||
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
||||
EXPECT(assert_equal(landmark, point, 1e-7));
|
||||
|
||||
// 2. Test nonlinear triangulation
|
||||
point = triangulateNonlinear<SphericalCamera>(cameras, measurements, point);
|
||||
EXPECT(assert_equal(landmark, point, 1e-7));
|
||||
|
||||
// 3. Test simple DLT, now within triangulatePoint3
|
||||
bool optimize = false;
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmark, *actual1, 1e-7));
|
||||
|
||||
// 4. test with optimization on, same answer
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual2 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmark, *actual2, 1e-7));
|
||||
|
||||
// 5. Add some noise and try again: result should be ~ (4.995,
|
||||
// 0.499167, 1.19814)
|
||||
measurements.at(0) =
|
||||
u1.retract(Vector2(0.01, 0.05)); // note: perturbation smaller for Unit3
|
||||
measurements.at(1) = u2.retract(Vector2(-0.02, 0.03));
|
||||
optimize = false;
|
||||
boost::optional<Point3> actual3 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(Point3(5.9432, 0.654319, 1.48192), *actual3, 1e-3));
|
||||
|
||||
// 6. Now with optimization on
|
||||
optimize = true;
|
||||
boost::optional<Point3> actual4 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(Point3(5.9432, 0.654334, 1.48192), *actual4, 1e-3));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(triangulation, twoPoses_sphericalCamera_extremeFOV) {
|
||||
vector<Pose3> poses;
|
||||
std::vector<Unit3> measurements;
|
||||
|
||||
// Project landmark into two cameras and triangulate
|
||||
Pose3 poseA = Pose3(
|
||||
Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2),
|
||||
Point3(0.0, 0.0, 0.0)); // with z pointing along x axis of global frame
|
||||
Pose3 poseB = Pose3(Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2),
|
||||
Point3(2.0, 0.0, 0.0)); // 2m in front of poseA
|
||||
Point3 landmarkL(
|
||||
1.0, -1.0,
|
||||
0.0); // 1m to the right of both cameras, in front of poseA, behind poseB
|
||||
SphericalCamera cam1(poseA);
|
||||
SphericalCamera cam2(poseB);
|
||||
Unit3 u1 = cam1.project(landmarkL);
|
||||
Unit3 u2 = cam2.project(landmarkL);
|
||||
|
||||
EXPECT(assert_equal(Unit3(Point3(1.0, 0.0, 1.0)), u1,
|
||||
1e-7)); // in front and to the right of PoseA
|
||||
EXPECT(assert_equal(Unit3(Point3(1.0, 0.0, -1.0)), u2,
|
||||
1e-7)); // behind and to the right of PoseB
|
||||
|
||||
poses += pose1, pose2;
|
||||
measurements += u1, u2;
|
||||
|
||||
CameraSet<SphericalCamera> cameras;
|
||||
cameras.push_back(cam1);
|
||||
cameras.push_back(cam2);
|
||||
|
||||
double rank_tol = 1e-9;
|
||||
|
||||
{
|
||||
// 1. Test simple DLT, when 1 point is behind spherical camera
|
||||
bool optimize = false;
|
||||
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
|
||||
CHECK_EXCEPTION(triangulatePoint3<SphericalCamera>(cameras, measurements,
|
||||
rank_tol, optimize),
|
||||
TriangulationCheiralityException);
|
||||
#else // otherwise project should not throw the exception
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmarkL, *actual1, 1e-7));
|
||||
#endif
|
||||
}
|
||||
{
|
||||
// 2. test with optimization on, same answer
|
||||
bool optimize = true;
|
||||
#ifdef GTSAM_THROW_CHEIRALITY_EXCEPTION
|
||||
CHECK_EXCEPTION(triangulatePoint3<SphericalCamera>(cameras, measurements,
|
||||
rank_tol, optimize),
|
||||
TriangulationCheiralityException);
|
||||
#else // otherwise project should not throw the exception
|
||||
boost::optional<Point3> actual1 = //
|
||||
triangulatePoint3<SphericalCamera>(cameras, measurements, rank_tol,
|
||||
optimize);
|
||||
EXPECT(assert_equal(landmarkL, *actual1, 1e-7));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
TEST(triangulation, reprojectionError_cameraComparison) {
|
||||
Pose3 poseA = Pose3(
|
||||
Rot3::Ypr(-M_PI / 2, 0., -M_PI / 2),
|
||||
Point3(0.0, 0.0, 0.0)); // with z pointing along x axis of global frame
|
||||
Point3 landmarkL(5.0, 0.0, 0.0); // 1m in front of poseA
|
||||
SphericalCamera sphericalCamera(poseA);
|
||||
Unit3 u = sphericalCamera.project(landmarkL);
|
||||
|
||||
static Cal3_S2::shared_ptr sharedK(new Cal3_S2(60, 640, 480));
|
||||
PinholePose<Cal3_S2> pinholeCamera(poseA, sharedK);
|
||||
Vector2 px = pinholeCamera.project(landmarkL);
|
||||
|
||||
// add perturbation and compare error in both cameras
|
||||
Vector2 px_noise(1.0, 2.0); // px perturbation vertically and horizontally
|
||||
Vector2 measured_px = px + px_noise;
|
||||
Vector2 measured_px_calibrated = sharedK->calibrate(measured_px);
|
||||
Unit3 measured_u =
|
||||
Unit3(measured_px_calibrated[0], measured_px_calibrated[1], 1.0);
|
||||
Unit3 expected_measured_u =
|
||||
Unit3(px_noise[0] / sharedK->fx(), px_noise[1] / sharedK->fy(), 1.0);
|
||||
EXPECT(assert_equal(expected_measured_u, measured_u, 1e-7));
|
||||
|
||||
Vector2 actualErrorPinhole =
|
||||
pinholeCamera.reprojectionError(landmarkL, measured_px);
|
||||
Vector2 expectedErrorPinhole = Vector2(-px_noise[0], -px_noise[1]);
|
||||
EXPECT(assert_equal(expectedErrorPinhole, actualErrorPinhole,
|
||||
1e-7)); //- sign due to definition of error
|
||||
|
||||
Vector2 actualErrorSpherical =
|
||||
sphericalCamera.reprojectionError(landmarkL, measured_u);
|
||||
// expectedError: not easy to calculate, since it involves the unit3 basis
|
||||
Vector2 expectedErrorSpherical(-0.00360842, 0.00180419);
|
||||
EXPECT(assert_equal(expectedErrorSpherical, actualErrorSpherical, 1e-7));
|
||||
}
|
||||
|
||||
//******************************************************************************
|
||||
int main() {
|
||||
TestResult tr;
|
||||
|
|
|
|||
|
|
@ -53,15 +53,57 @@ Vector4 triangulateHomogeneousDLT(
|
|||
return v;
|
||||
}
|
||||
|
||||
Point3 triangulateDLT(const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
Vector4 triangulateHomogeneousDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const std::vector<Unit3>& measurements, double rank_tol) {
|
||||
|
||||
// number of cameras
|
||||
size_t m = projection_matrices.size();
|
||||
|
||||
// Allocate DLT matrix
|
||||
Matrix A = Matrix::Zero(m * 2, 4);
|
||||
|
||||
for (size_t i = 0; i < m; i++) {
|
||||
size_t row = i * 2;
|
||||
const Matrix34& projection = projection_matrices.at(i);
|
||||
const Point3& p = measurements.at(i).point3(); // to get access to x,y,z of the bearing vector
|
||||
|
||||
// build system of equations
|
||||
A.row(row) = p.x() * projection.row(2) - p.z() * projection.row(0);
|
||||
A.row(row + 1) = p.y() * projection.row(2) - p.z() * projection.row(1);
|
||||
}
|
||||
int rank;
|
||||
double error;
|
||||
Vector v;
|
||||
boost::tie(rank, error, v) = DLT(A, rank_tol);
|
||||
|
||||
if (rank < 3)
|
||||
throw(TriangulationUnderconstrainedException());
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
Point3 triangulateDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const Point2Vector& measurements, double rank_tol) {
|
||||
|
||||
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements, rank_tol);
|
||||
|
||||
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements,
|
||||
rank_tol);
|
||||
// Create 3D point from homogeneous coordinates
|
||||
return Point3(v.head<3>() / v[3]);
|
||||
}
|
||||
|
||||
Point3 triangulateDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const std::vector<Unit3>& measurements, double rank_tol) {
|
||||
|
||||
// contrary to previous triangulateDLT, this is now taking Unit3 inputs
|
||||
Vector4 v = triangulateHomogeneousDLT(projection_matrices, measurements,
|
||||
rank_tol);
|
||||
// Create 3D point from homogeneous coordinates
|
||||
return Point3(v.head<3>() / v[3]);
|
||||
}
|
||||
|
||||
///
|
||||
/**
|
||||
* Optimize for triangulation
|
||||
|
|
@ -71,7 +113,7 @@ Point3 triangulateDLT(const std::vector<Matrix34, Eigen::aligned_allocator<Matri
|
|||
* @return refined Point3
|
||||
*/
|
||||
Point3 optimize(const NonlinearFactorGraph& graph, const Values& values,
|
||||
Key landmarkKey) {
|
||||
Key landmarkKey) {
|
||||
// Maybe we should consider Gauss-Newton?
|
||||
LevenbergMarquardtParams params;
|
||||
params.verbosityLM = LevenbergMarquardtParams::TRYLAMBDA;
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@
|
|||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/CameraSet.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/SphericalCamera.h>
|
||||
#include <gtsam/geometry/Pose2.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
|
|
@ -59,6 +60,18 @@ GTSAM_EXPORT Vector4 triangulateHomogeneousDLT(
|
|||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const Point2Vector& measurements, double rank_tol = 1e-9);
|
||||
|
||||
/**
|
||||
* Same math as Hartley and Zisserman, 2nd Ed., page 312, but with unit-norm bearing vectors
|
||||
* (contrarily to pinhole projection, the z entry is not assumed to be 1 as in Hartley and Zisserman)
|
||||
* @param projection_matrices Projection matrices (K*P^-1)
|
||||
* @param measurements Unit3 bearing measurements
|
||||
* @param rank_tol SVD rank tolerance
|
||||
* @return Triangulated point, in homogeneous coordinates
|
||||
*/
|
||||
GTSAM_EXPORT Vector4 triangulateHomogeneousDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const std::vector<Unit3>& measurements, double rank_tol = 1e-9);
|
||||
|
||||
/**
|
||||
* DLT triangulation: See Hartley and Zisserman, 2nd Ed., page 312
|
||||
* @param projection_matrices Projection matrices (K*P^-1)
|
||||
|
|
@ -71,6 +84,14 @@ GTSAM_EXPORT Point3 triangulateDLT(
|
|||
const Point2Vector& measurements,
|
||||
double rank_tol = 1e-9);
|
||||
|
||||
/**
|
||||
* overload of previous function to work with Unit3 (projected to canonical camera)
|
||||
*/
|
||||
GTSAM_EXPORT Point3 triangulateDLT(
|
||||
const std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>& projection_matrices,
|
||||
const std::vector<Unit3>& measurements,
|
||||
double rank_tol = 1e-9);
|
||||
|
||||
/**
|
||||
* Create a factor graph with projection factors from poses and one calibration
|
||||
* @param poses Camera poses
|
||||
|
|
@ -180,26 +201,27 @@ Point3 triangulateNonlinear(
|
|||
return optimize(graph, values, Symbol('p', 0));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a 3*4 camera projection matrix from calibration and pose.
|
||||
* Functor for partial application on calibration
|
||||
* @param pose The camera pose
|
||||
* @param cal The calibration
|
||||
* @return Returns a Matrix34
|
||||
*/
|
||||
template<class CAMERA>
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>>
|
||||
projectionMatricesFromCameras(const CameraSet<CAMERA> &cameras) {
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>> projection_matrices;
|
||||
for (const CAMERA &camera: cameras) {
|
||||
projection_matrices.push_back(camera.cameraProjectionMatrix());
|
||||
}
|
||||
return projection_matrices;
|
||||
}
|
||||
|
||||
// overload, assuming pinholePose
|
||||
template<class CALIBRATION>
|
||||
struct CameraProjectionMatrix {
|
||||
CameraProjectionMatrix(const CALIBRATION& calibration) :
|
||||
K_(calibration.K()) {
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>> projectionMatricesFromPoses(
|
||||
const std::vector<Pose3> &poses, boost::shared_ptr<CALIBRATION> sharedCal) {
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>> projection_matrices;
|
||||
for (size_t i = 0; i < poses.size(); i++) {
|
||||
PinholePose<CALIBRATION> camera(poses.at(i), sharedCal);
|
||||
projection_matrices.push_back(camera.cameraProjectionMatrix());
|
||||
}
|
||||
Matrix34 operator()(const Pose3& pose) const {
|
||||
return K_ * (pose.inverse().matrix()).block<3, 4>(0, 0);
|
||||
}
|
||||
private:
|
||||
const Matrix3 K_;
|
||||
public:
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
};
|
||||
return projection_matrices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Function to triangulate 3D landmark point from an arbitrary number
|
||||
|
|
@ -224,10 +246,7 @@ Point3 triangulatePoint3(const std::vector<Pose3>& poses,
|
|||
throw(TriangulationUnderconstrainedException());
|
||||
|
||||
// construct projection matrices from poses & calibration
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>> projection_matrices;
|
||||
CameraProjectionMatrix<CALIBRATION> createP(*sharedCal); // partially apply
|
||||
for(const Pose3& pose: poses)
|
||||
projection_matrices.push_back(createP(pose));
|
||||
auto projection_matrices = projectionMatricesFromPoses(poses, sharedCal);
|
||||
|
||||
// Triangulate linearly
|
||||
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
||||
|
|
@ -274,11 +293,7 @@ Point3 triangulatePoint3(
|
|||
throw(TriangulationUnderconstrainedException());
|
||||
|
||||
// construct projection matrices from poses & calibration
|
||||
std::vector<Matrix34, Eigen::aligned_allocator<Matrix34>> projection_matrices;
|
||||
for(const CAMERA& camera: cameras)
|
||||
projection_matrices.push_back(
|
||||
CameraProjectionMatrix<typename CAMERA::CalibrationType>(camera.calibration())(
|
||||
camera.pose()));
|
||||
auto projection_matrices = projectionMatricesFromCameras(cameras);
|
||||
Point3 point = triangulateDLT(projection_matrices, measurements, rank_tol);
|
||||
|
||||
// The n refine using non-linear optimization
|
||||
|
|
@ -474,8 +489,8 @@ TriangulationResult triangulateSafe(const CameraSet<CAMERA>& cameras,
|
|||
#endif
|
||||
// Check reprojection error
|
||||
if (params.dynamicOutlierRejectionThreshold > 0) {
|
||||
const Point2& zi = measured.at(i);
|
||||
Point2 reprojectionError(camera.project(point) - zi);
|
||||
const typename CAMERA::Measurement& zi = measured.at(i);
|
||||
Point2 reprojectionError = camera.reprojectionError(point, zi);
|
||||
maxReprojError = std::max(maxReprojError, reprojectionError.norm());
|
||||
}
|
||||
i += 1;
|
||||
|
|
@ -503,6 +518,6 @@ using CameraSetCal3Bundler = CameraSet<PinholeCamera<Cal3Bundler>>;
|
|||
using CameraSetCal3_S2 = CameraSet<PinholeCamera<Cal3_S2>>;
|
||||
using CameraSetCal3Fisheye = CameraSet<PinholeCamera<Cal3Fisheye>>;
|
||||
using CameraSetCal3Unified = CameraSet<PinholeCamera<Cal3Unified>>;
|
||||
|
||||
using CameraSetSpherical = CameraSet<SphericalCamera>;
|
||||
} // \namespace gtsam
|
||||
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
#include <gtsam/inference/Key.h>
|
||||
#include <gtsam/base/FastList.h>
|
||||
#include <gtsam/base/ConcurrentMap.h>
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ class ClusterTree {
|
|||
typedef sharedCluster sharedNode;
|
||||
|
||||
/** concept check */
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(FactorType);
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(FactorType)
|
||||
|
||||
protected:
|
||||
FastVector<sharedNode> roots_;
|
||||
|
|
|
|||
|
|
@ -36,17 +36,17 @@ namespace gtsam {
|
|||
// no Ordering is provided. When removing optional from VariableIndex, create VariableIndex
|
||||
// before creating ordering.
|
||||
VariableIndex computedVariableIndex(asDerived());
|
||||
return eliminateSequential(function, computedVariableIndex, orderingType);
|
||||
return eliminateSequential(orderingType, function, computedVariableIndex);
|
||||
}
|
||||
else {
|
||||
// Compute an ordering and call this function again. We are guaranteed to have a
|
||||
// VariableIndex already here because we computed one if needed in the previous 'if' block.
|
||||
if (orderingType == Ordering::METIS) {
|
||||
Ordering computedOrdering = Ordering::Metis(asDerived());
|
||||
return eliminateSequential(computedOrdering, function, variableIndex, orderingType);
|
||||
return eliminateSequential(computedOrdering, function, variableIndex);
|
||||
} else {
|
||||
Ordering computedOrdering = Ordering::Colamd(*variableIndex);
|
||||
return eliminateSequential(computedOrdering, function, variableIndex, orderingType);
|
||||
return eliminateSequential(computedOrdering, function, variableIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -78,29 +78,31 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class FACTORGRAPH>
|
||||
boost::shared_ptr<typename EliminateableFactorGraph<FACTORGRAPH>::BayesTreeType>
|
||||
EliminateableFactorGraph<FACTORGRAPH>::eliminateMultifrontal(
|
||||
OptionalOrderingType orderingType, const Eliminate& function,
|
||||
OptionalVariableIndex variableIndex) const
|
||||
{
|
||||
if(!variableIndex) {
|
||||
// If no VariableIndex provided, compute one and call this function again IMPORTANT: we check
|
||||
// for no variable index first so that it's always computed if we need to call COLAMD because
|
||||
// no Ordering is provided. When removing optional from VariableIndex, create VariableIndex
|
||||
// before creating ordering.
|
||||
template <class FACTORGRAPH>
|
||||
boost::shared_ptr<
|
||||
typename EliminateableFactorGraph<FACTORGRAPH>::BayesTreeType>
|
||||
EliminateableFactorGraph<FACTORGRAPH>::eliminateMultifrontal(
|
||||
OptionalOrderingType orderingType, const Eliminate& function,
|
||||
OptionalVariableIndex variableIndex) const {
|
||||
if (!variableIndex) {
|
||||
// If no VariableIndex provided, compute one and call this function again
|
||||
// IMPORTANT: we check for no variable index first so that it's always
|
||||
// computed if we need to call COLAMD because no Ordering is provided.
|
||||
// When removing optional from VariableIndex, create VariableIndex before
|
||||
// creating ordering.
|
||||
VariableIndex computedVariableIndex(asDerived());
|
||||
return eliminateMultifrontal(function, computedVariableIndex, orderingType);
|
||||
}
|
||||
else {
|
||||
// Compute an ordering and call this function again. We are guaranteed to have a
|
||||
// VariableIndex already here because we computed one if needed in the previous 'if' block.
|
||||
return eliminateMultifrontal(orderingType, function,
|
||||
computedVariableIndex);
|
||||
} else {
|
||||
// Compute an ordering and call this function again. We are guaranteed to
|
||||
// have a VariableIndex already here because we computed one if needed in
|
||||
// the previous 'if' block.
|
||||
if (orderingType == Ordering::METIS) {
|
||||
Ordering computedOrdering = Ordering::Metis(asDerived());
|
||||
return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType);
|
||||
return eliminateMultifrontal(computedOrdering, function, variableIndex);
|
||||
} else {
|
||||
Ordering computedOrdering = Ordering::Colamd(*variableIndex);
|
||||
return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType);
|
||||
return eliminateMultifrontal(computedOrdering, function, variableIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -273,7 +275,7 @@ namespace gtsam {
|
|||
else
|
||||
{
|
||||
// No ordering was provided for the unmarginalized variables, so order them with COLAMD.
|
||||
return factorGraph->eliminateSequential(function);
|
||||
return factorGraph->eliminateSequential(Ordering::COLAMD, function);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -340,7 +342,7 @@ namespace gtsam {
|
|||
else
|
||||
{
|
||||
// No ordering was provided for the unmarginalized variables, so order them with COLAMD.
|
||||
return factorGraph->eliminateMultifrontal(function);
|
||||
return factorGraph->eliminateMultifrontal(Ordering::COLAMD, function);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -288,8 +288,9 @@ namespace gtsam {
|
|||
FactorGraphType& asDerived() { return static_cast<FactorGraphType&>(*this); }
|
||||
|
||||
public:
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
|
||||
/** \deprecated ordering and orderingType shouldn't both be specified */
|
||||
boost::shared_ptr<BayesNetType> eliminateSequential(
|
||||
boost::shared_ptr<BayesNetType> GTSAM_DEPRECATED eliminateSequential(
|
||||
const Ordering& ordering,
|
||||
const Eliminate& function,
|
||||
OptionalVariableIndex variableIndex,
|
||||
|
|
@ -298,7 +299,7 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/** \deprecated orderingType specified first for consistency */
|
||||
boost::shared_ptr<BayesNetType> eliminateSequential(
|
||||
boost::shared_ptr<BayesNetType> GTSAM_DEPRECATED eliminateSequential(
|
||||
const Eliminate& function,
|
||||
OptionalVariableIndex variableIndex = boost::none,
|
||||
OptionalOrderingType orderingType = boost::none) const {
|
||||
|
|
@ -306,7 +307,7 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/** \deprecated ordering and orderingType shouldn't both be specified */
|
||||
boost::shared_ptr<BayesTreeType> eliminateMultifrontal(
|
||||
boost::shared_ptr<BayesTreeType> GTSAM_DEPRECATED eliminateMultifrontal(
|
||||
const Ordering& ordering,
|
||||
const Eliminate& function,
|
||||
OptionalVariableIndex variableIndex,
|
||||
|
|
@ -315,7 +316,7 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/** \deprecated orderingType specified first for consistency */
|
||||
boost::shared_ptr<BayesTreeType> eliminateMultifrontal(
|
||||
boost::shared_ptr<BayesTreeType> GTSAM_DEPRECATED eliminateMultifrontal(
|
||||
const Eliminate& function,
|
||||
OptionalVariableIndex variableIndex = boost::none,
|
||||
OptionalOrderingType orderingType = boost::none) const {
|
||||
|
|
@ -323,7 +324,7 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/** \deprecated */
|
||||
boost::shared_ptr<BayesNetType> marginalMultifrontalBayesNet(
|
||||
boost::shared_ptr<BayesNetType> GTSAM_DEPRECATED marginalMultifrontalBayesNet(
|
||||
boost::variant<const Ordering&, const KeyVector&> variables,
|
||||
boost::none_t,
|
||||
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
|
||||
|
|
@ -332,13 +333,14 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/** \deprecated */
|
||||
boost::shared_ptr<BayesTreeType> marginalMultifrontalBayesTree(
|
||||
boost::shared_ptr<BayesTreeType> GTSAM_DEPRECATED marginalMultifrontalBayesTree(
|
||||
boost::variant<const Ordering&, const KeyVector&> variables,
|
||||
boost::none_t,
|
||||
const Eliminate& function = EliminationTraitsType::DefaultEliminate,
|
||||
OptionalVariableIndex variableIndex = boost::none) const {
|
||||
return marginalMultifrontalBayesTree(variables, function, variableIndex);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ namespace gtsam {
|
|||
|
||||
protected:
|
||||
/** concept check */
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(FactorType);
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(FactorType)
|
||||
|
||||
FastVector<sharedNode> roots_;
|
||||
FastVector<sharedFactor> remainingFactors_;
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <boost/serialization/nvp.hpp>
|
||||
#include <boost/shared_ptr.hpp>
|
||||
|
||||
#include <gtsam/base/types.h>
|
||||
#include <gtsam/base/FastVector.h>
|
||||
|
|
|
|||
|
|
@ -111,10 +111,10 @@ double dot(const Errors& a, const Errors& b) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<>
|
||||
void axpy<Errors,Errors>(double alpha, const Errors& x, Errors& y) {
|
||||
void axpy(double alpha, const Errors& x, Errors& y) {
|
||||
Errors::const_iterator it = x.begin();
|
||||
for(Vector& yi: y)
|
||||
axpy(alpha,*(it++),yi);
|
||||
yi += alpha * (*(it++));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ namespace gtsam {
|
|||
* BLAS level 2 style
|
||||
*/
|
||||
template <>
|
||||
GTSAM_EXPORT void axpy<Errors,Errors>(double alpha, const Errors& x, Errors& y);
|
||||
GTSAM_EXPORT void axpy(double alpha, const Errors& x, Errors& y);
|
||||
|
||||
/** print with optional string */
|
||||
GTSAM_EXPORT void print(const Errors& a, const std::string& s = "Error");
|
||||
|
|
|
|||
|
|
@ -19,7 +19,6 @@
|
|||
*/
|
||||
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
#include <gtsam/linear/GaussianEliminationTree.h>
|
||||
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||
|
|
@ -290,10 +289,11 @@ namespace gtsam {
|
|||
return blocks;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************ */
|
||||
VectorValues GaussianFactorGraph::optimize(const Eliminate& function) const {
|
||||
gttic(GaussianFactorGraph_optimize);
|
||||
return BaseEliminateable::eliminateMultifrontal(function)->optimize();
|
||||
return BaseEliminateable::eliminateMultifrontal(Ordering::COLAMD, function)
|
||||
->optimize();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
@ -504,10 +504,32 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/** \deprecated */
|
||||
VectorValues GaussianFactorGraph::optimize(boost::none_t,
|
||||
const Eliminate& function) const {
|
||||
return optimize(function);
|
||||
void GaussianFactorGraph::printErrors(
|
||||
const VectorValues& values, const std::string& str,
|
||||
const KeyFormatter& keyFormatter,
|
||||
const std::function<bool(const Factor* /*factor*/,
|
||||
double /*whitenedError*/, size_t /*index*/)>&
|
||||
printCondition) const {
|
||||
cout << str << "size: " << size() << endl << endl;
|
||||
for (size_t i = 0; i < (*this).size(); i++) {
|
||||
const sharedFactor& factor = (*this)[i];
|
||||
const double errorValue =
|
||||
(factor != nullptr ? (*this)[i]->error(values) : .0);
|
||||
if (!printCondition(factor.get(), errorValue, i))
|
||||
continue; // User-provided filter did not pass
|
||||
|
||||
stringstream ss;
|
||||
ss << "Factor " << i << ": ";
|
||||
if (factor == nullptr) {
|
||||
cout << "nullptr"
|
||||
<< "\n";
|
||||
} else {
|
||||
factor->print(ss.str(), keyFormatter);
|
||||
cout << "error = " << errorValue << "\n";
|
||||
}
|
||||
cout << endl; // only one "endl" at end might be faster, \n for each
|
||||
// factor
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
|||
|
|
@ -21,12 +21,13 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/inference/FactorGraph.h>
|
||||
#include <gtsam/inference/EliminateableFactorGraph.h>
|
||||
#include <gtsam/inference/FactorGraph.h>
|
||||
#include <gtsam/linear/Errors.h> // Included here instead of fw-declared so we can use Errors::iterator
|
||||
#include <gtsam/linear/GaussianFactor.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
#include <gtsam/linear/Errors.h> // Included here instead of fw-declared so we can use Errors::iterator
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
|
@ -375,6 +376,14 @@ namespace gtsam {
|
|||
/** In-place version e <- A*x that takes an iterator. */
|
||||
void multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const;
|
||||
|
||||
void printErrors(
|
||||
const VectorValues& x,
|
||||
const std::string& str = "GaussianFactorGraph: ",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
const std::function<bool(const Factor* /*factor*/,
|
||||
double /*whitenedError*/, size_t /*index*/)>&
|
||||
printCondition =
|
||||
[](const Factor*, double, size_t) { return true; }) const;
|
||||
/// @}
|
||||
|
||||
private:
|
||||
|
|
@ -387,9 +396,14 @@ namespace gtsam {
|
|||
|
||||
public:
|
||||
|
||||
/** \deprecated */
|
||||
VectorValues optimize(boost::none_t,
|
||||
const Eliminate& function = EliminationTraitsType::DefaultEliminate) const;
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
|
||||
/** \deprecated */
|
||||
VectorValues optimize(boost::none_t,
|
||||
const Eliminate& function =
|
||||
EliminationTraitsType::DefaultEliminate) const {
|
||||
return optimize(function);
|
||||
}
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -177,17 +177,16 @@ namespace gtsam {
|
|||
return *sqrt_information_;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
/** protected constructor takes square root information matrix */
|
||||
Gaussian(size_t dim = 1, const boost::optional<Matrix>& sqrt_information = boost::none) :
|
||||
Base(dim), sqrt_information_(sqrt_information) {
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
typedef boost::shared_ptr<Gaussian> shared_ptr;
|
||||
|
||||
/** constructor takes square root information matrix */
|
||||
Gaussian(size_t dim = 1,
|
||||
const boost::optional<Matrix>& sqrt_information = boost::none)
|
||||
: Base(dim), sqrt_information_(sqrt_information) {}
|
||||
|
||||
~Gaussian() override {}
|
||||
|
||||
/**
|
||||
|
|
@ -290,13 +289,13 @@ namespace gtsam {
|
|||
Vector sigmas_, invsigmas_, precisions_;
|
||||
|
||||
protected:
|
||||
/** protected constructor - no initializations */
|
||||
Diagonal();
|
||||
|
||||
/** constructor to allow for disabling initialization of invsigmas */
|
||||
Diagonal(const Vector& sigmas);
|
||||
|
||||
public:
|
||||
/** constructor - no initializations, for serialization */
|
||||
Diagonal();
|
||||
|
||||
typedef boost::shared_ptr<Diagonal> shared_ptr;
|
||||
|
||||
|
|
@ -387,14 +386,6 @@ namespace gtsam {
|
|||
// Sigmas are contained in the base class
|
||||
Vector mu_; ///< Penalty function weight - needs to be large enough to dominate soft constraints
|
||||
|
||||
/**
|
||||
* protected constructor takes sigmas.
|
||||
* prevents any inf values
|
||||
* from appearing in invsigmas or precisions.
|
||||
* mu set to large default value (1000.0)
|
||||
*/
|
||||
Constrained(const Vector& sigmas = Z_1x1);
|
||||
|
||||
/**
|
||||
* Constructor that prevents any inf values
|
||||
* from appearing in invsigmas or precisions.
|
||||
|
|
@ -406,6 +397,14 @@ namespace gtsam {
|
|||
|
||||
typedef boost::shared_ptr<Constrained> shared_ptr;
|
||||
|
||||
/**
|
||||
* protected constructor takes sigmas.
|
||||
* prevents any inf values
|
||||
* from appearing in invsigmas or precisions.
|
||||
* mu set to large default value (1000.0)
|
||||
*/
|
||||
Constrained(const Vector& sigmas = Z_1x1);
|
||||
|
||||
~Constrained() override {}
|
||||
|
||||
/// true if a constrained noise mode, saves slow/clumsy dynamic casting
|
||||
|
|
@ -461,6 +460,11 @@ namespace gtsam {
|
|||
return MixedVariances(precisions.array().inverse());
|
||||
}
|
||||
|
||||
/**
|
||||
* The squaredMahalanobisDistance function for a constrained noisemodel,
|
||||
* for non-constrained versions, uses sigmas, otherwise
|
||||
* uses the penalty function with mu
|
||||
*/
|
||||
double squaredMahalanobisDistance(const Vector& v) const override;
|
||||
|
||||
/** Fully constrained variations */
|
||||
|
|
@ -531,11 +535,11 @@ namespace gtsam {
|
|||
Isotropic(size_t dim, double sigma) :
|
||||
Diagonal(Vector::Constant(dim, sigma)),sigma_(sigma),invsigma_(1.0/sigma) {}
|
||||
|
||||
public:
|
||||
|
||||
/* dummy constructor to allow for serialization */
|
||||
Isotropic() : Diagonal(Vector1::Constant(1.0)),sigma_(1.0),invsigma_(1.0) {}
|
||||
|
||||
public:
|
||||
|
||||
~Isotropic() override {}
|
||||
|
||||
typedef boost::shared_ptr<Isotropic> shared_ptr;
|
||||
|
|
@ -592,14 +596,13 @@ namespace gtsam {
|
|||
* Unit: i.i.d. unit-variance noise on all m dimensions.
|
||||
*/
|
||||
class GTSAM_EXPORT Unit : public Isotropic {
|
||||
protected:
|
||||
|
||||
Unit(size_t dim=1): Isotropic(dim,1.0) {}
|
||||
|
||||
public:
|
||||
|
||||
typedef boost::shared_ptr<Unit> shared_ptr;
|
||||
|
||||
/** constructor for serialization */
|
||||
Unit(size_t dim=1): Isotropic(dim,1.0) {}
|
||||
|
||||
~Unit() override {}
|
||||
|
||||
/**
|
||||
|
|
@ -682,19 +685,19 @@ namespace gtsam {
|
|||
/// Return the contained noise model
|
||||
const NoiseModel::shared_ptr& noise() const { return noise_; }
|
||||
|
||||
// TODO: functions below are dummy but necessary for the noiseModel::Base
|
||||
// Functions below are dummy but necessary for the noiseModel::Base
|
||||
inline Vector whiten(const Vector& v) const override
|
||||
{ Vector r = v; this->WhitenSystem(r); return r; }
|
||||
inline Matrix Whiten(const Matrix& A) const override
|
||||
{ Vector b; Matrix B=A; this->WhitenSystem(B,b); return B; }
|
||||
inline Vector unwhiten(const Vector& /*v*/) const override
|
||||
{ throw std::invalid_argument("unwhiten is not currently supported for robust noise models."); }
|
||||
|
||||
/// Compute loss from the m-estimator using the Mahalanobis distance.
|
||||
double loss(const double squared_distance) const override {
|
||||
return robust_->loss(std::sqrt(squared_distance));
|
||||
}
|
||||
|
||||
// TODO: these are really robust iterated re-weighting support functions
|
||||
// These are really robust iterated re-weighting support functions
|
||||
virtual void WhitenSystem(Vector& b) const;
|
||||
void WhitenSystem(std::vector<Matrix>& A, Vector& b) const override;
|
||||
void WhitenSystem(Matrix& A, Vector& b) const override;
|
||||
|
|
@ -705,7 +708,6 @@ namespace gtsam {
|
|||
return noise_->unweightedWhiten(v);
|
||||
}
|
||||
double weight(const Vector& v) const override {
|
||||
// Todo(mikebosse): make the robust weight function input a vector.
|
||||
return robust_->weight(v.norm());
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -173,7 +173,7 @@ void SubgraphPreconditioner::transposeMultiplyAdd
|
|||
Errors::const_iterator it = e.begin();
|
||||
for(auto& key_value: y) {
|
||||
const Vector& ei = *it;
|
||||
axpy(alpha, ei, key_value.second);
|
||||
key_value.second += alpha * ei;
|
||||
++it;
|
||||
}
|
||||
transposeMultiplyAdd2(alpha, it, e.end(), y);
|
||||
|
|
@ -191,7 +191,7 @@ void SubgraphPreconditioner::transposeMultiplyAdd2 (double alpha,
|
|||
|
||||
VectorValues x = VectorValues::Zero(y); // x = 0
|
||||
Ab2_->transposeMultiplyAdd(1.0,e2,x); // x += A2'*e2
|
||||
axpy(alpha, Rc1_->backSubstituteTranspose(x), y); // y += alpha*inv(R1')*x
|
||||
y += alpha * Rc1_->backSubstituteTranspose(x); // y += alpha*inv(R1')*x
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ namespace gtsam {
|
|||
double takeOptimalStep(V& x) {
|
||||
// TODO: can we use gamma instead of dot(d,g) ????? Answer not trivial
|
||||
double alpha = -dot(d, g) / dot(Ad, Ad); // calculate optimal step-size
|
||||
axpy(alpha, d, x); // // do step in new search direction, x += alpha*d
|
||||
x += alpha * d; // do step in new search direction, x += alpha*d
|
||||
return alpha;
|
||||
}
|
||||
|
||||
|
|
@ -106,7 +106,7 @@ namespace gtsam {
|
|||
double beta = new_gamma / gamma;
|
||||
// d = g + d*beta;
|
||||
d *= beta;
|
||||
axpy(1.0, g, d);
|
||||
d += 1.0 * g;
|
||||
}
|
||||
|
||||
gamma = new_gamma;
|
||||
|
|
|
|||
|
|
@ -34,6 +34,9 @@ virtual class Gaussian : gtsam::noiseModel::Base {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
virtual class Diagonal : gtsam::noiseModel::Gaussian {
|
||||
|
|
@ -49,6 +52,9 @@ virtual class Diagonal : gtsam::noiseModel::Gaussian {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
virtual class Constrained : gtsam::noiseModel::Diagonal {
|
||||
|
|
@ -66,6 +72,9 @@ virtual class Constrained : gtsam::noiseModel::Diagonal {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
virtual class Isotropic : gtsam::noiseModel::Diagonal {
|
||||
|
|
@ -78,6 +87,9 @@ virtual class Isotropic : gtsam::noiseModel::Diagonal {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
virtual class Unit : gtsam::noiseModel::Isotropic {
|
||||
|
|
@ -85,6 +97,9 @@ virtual class Unit : gtsam::noiseModel::Isotropic {
|
|||
|
||||
// enabling serialization functionality
|
||||
void serializable() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
namespace mEstimator {
|
||||
|
|
@ -221,6 +236,7 @@ class VectorValues {
|
|||
//Constructors
|
||||
VectorValues();
|
||||
VectorValues(const gtsam::VectorValues& other);
|
||||
VectorValues(const gtsam::VectorValues& first, const gtsam::VectorValues& second);
|
||||
|
||||
//Named Constructors
|
||||
static gtsam::VectorValues Zero(const gtsam::VectorValues& model);
|
||||
|
|
@ -301,6 +317,7 @@ virtual class JacobianFactor : gtsam::GaussianFactor {
|
|||
void print(string s = "", const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
void printKeys(string s) const;
|
||||
gtsam::KeyVector& keys() const;
|
||||
bool equals(const gtsam::GaussianFactor& lf, double tol) const;
|
||||
size_t size() const;
|
||||
Vector unweighted_error(const gtsam::VectorValues& c) const;
|
||||
|
|
@ -400,6 +417,7 @@ class GaussianFactorGraph {
|
|||
// error and probability
|
||||
double error(const gtsam::VectorValues& c) const;
|
||||
double probPrime(const gtsam::VectorValues& c) const;
|
||||
void printErrors(const gtsam::VectorValues& c, string str = "GaussianFactorGraph: ", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const;
|
||||
|
||||
gtsam::GaussianFactorGraph clone() const;
|
||||
gtsam::GaussianFactorGraph negate() const;
|
||||
|
|
@ -512,9 +530,9 @@ virtual class GaussianBayesNet {
|
|||
size_t size() const;
|
||||
|
||||
// FactorGraph derived interface
|
||||
// size_t size() const;
|
||||
gtsam::GaussianConditional* at(size_t idx) const;
|
||||
gtsam::KeySet keys() const;
|
||||
gtsam::KeyVector keyVector() const;
|
||||
bool exists(size_t idx) const;
|
||||
|
||||
void saveGraph(const string& s) const;
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ TEST( Errors, arithmetic )
|
|||
e += Vector2(1.0,2.0), Vector3(3.0,4.0,5.0);
|
||||
DOUBLES_EQUAL(1+4+9+16+25,dot(e,e),1e-9);
|
||||
|
||||
axpy(2.0,e,e);
|
||||
axpy(2.0, e, e);
|
||||
Errors expected;
|
||||
expected += Vector2(3.0,6.0), Vector3(9.0,12.0,15.0);
|
||||
CHECK(assert_equal(expected,e));
|
||||
|
|
|
|||
|
|
@ -662,25 +662,14 @@ TEST(NoiseModel, robustNoiseL2WithDeadZone)
|
|||
{
|
||||
double dead_zone_size = 1.0;
|
||||
SharedNoiseModel robust = noiseModel::Robust::Create(
|
||||
noiseModel::mEstimator::L2WithDeadZone::Create(dead_zone_size),
|
||||
Unit::Create(3));
|
||||
|
||||
/*
|
||||
* TODO(mike): There is currently a bug in GTSAM, where none of the mEstimator classes
|
||||
* implement a loss function, and GTSAM calls the weight function to evaluate the
|
||||
* total penalty, rather than calling the loss function. The weight function should be
|
||||
* used during iteratively reweighted least squares optimization, but should not be used to
|
||||
* evaluate the total penalty. The long-term solution is for all mEstimators to implement
|
||||
* both a weight and a loss function, and for GTSAM to call the loss function when
|
||||
* evaluating the total penalty. This bug causes the test below to fail, so I'm leaving it
|
||||
* commented out until the underlying bug in GTSAM is fixed.
|
||||
*
|
||||
* for (int i = 0; i < 5; i++) {
|
||||
* Vector3 error = Vector3(i, 0, 0);
|
||||
* DOUBLES_EQUAL(0.5*max(0,i-1)*max(0,i-1), robust->distance(error), 1e-8);
|
||||
* }
|
||||
*/
|
||||
noiseModel::mEstimator::L2WithDeadZone::Create(dead_zone_size),
|
||||
Unit::Create(3));
|
||||
|
||||
for (int i = 0; i < 5; i++) {
|
||||
Vector3 error = Vector3(i, 0, 0);
|
||||
DOUBLES_EQUAL(std::fmax(0, i - dead_zone_size) * i,
|
||||
robust->squaredMahalanobisDistance(error), 1e-8);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(NoiseModel, lossFunctionAtZero)
|
||||
|
|
@ -707,9 +696,9 @@ TEST(NoiseModel, lossFunctionAtZero)
|
|||
auto dcs = mEstimator::DCS::Create(k);
|
||||
DOUBLES_EQUAL(dcs->loss(0), 0, 1e-8);
|
||||
DOUBLES_EQUAL(dcs->weight(0), 1, 1e-8);
|
||||
// auto lsdz = mEstimator::L2WithDeadZone::Create(k);
|
||||
// DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8);
|
||||
// DOUBLES_EQUAL(lsdz->weight(0), 1, 1e-8);
|
||||
auto lsdz = mEstimator::L2WithDeadZone::Create(k);
|
||||
DOUBLES_EQUAL(lsdz->loss(0), 0, 1e-8);
|
||||
DOUBLES_EQUAL(lsdz->weight(0), 0, 1e-8);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -39,14 +39,14 @@ using namespace gtsam::serializationTestHelpers;
|
|||
/* ************************************************************************* */
|
||||
// Export Noisemodels
|
||||
// See http://www.boost.org/doc/libs/1_32_0/libs/serialization/doc/special.html
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
|
||||
|
||||
/* ************************************************************************* */
|
||||
// example noise models
|
||||
|
|
@ -129,9 +129,9 @@ TEST (Serialization, SharedDiagonal_noiseModels) {
|
|||
|
||||
/* Create GUIDs for factors */
|
||||
/* ************************************************************************* */
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional")
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST (Serialization, linear_factors) {
|
||||
|
|
|
|||
|
|
@ -257,5 +257,5 @@ std::ostream& operator<<(std::ostream& os, const CombinedImuFactor& f) {
|
|||
/// namespace gtsam
|
||||
|
||||
/// Boost serialization export definition for derived class
|
||||
BOOST_CLASS_EXPORT_IMPLEMENT(gtsam::PreintegrationCombinedParams);
|
||||
BOOST_CLASS_EXPORT_IMPLEMENT(gtsam::PreintegrationCombinedParams)
|
||||
|
||||
|
|
|
|||
|
|
@ -353,4 +353,4 @@ struct traits<CombinedImuFactor> : public Testable<CombinedImuFactor> {};
|
|||
} // namespace gtsam
|
||||
|
||||
/// Add Boost serialization export key (declaration) for derived class
|
||||
BOOST_CLASS_EXPORT_KEY(gtsam::PreintegrationCombinedParams);
|
||||
BOOST_CLASS_EXPORT_KEY(gtsam::PreintegrationCombinedParams)
|
||||
|
|
|
|||
|
|
@ -41,6 +41,12 @@ class ConstantBias {
|
|||
Vector gyroscope() const;
|
||||
Vector correctAccelerometer(Vector measurement) const;
|
||||
Vector correctGyroscope(Vector measurement) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
}///\namespace imuBias
|
||||
|
|
@ -64,6 +70,12 @@ class NavState {
|
|||
|
||||
gtsam::NavState retract(const Vector& x) const;
|
||||
Vector localCoordinates(const gtsam::NavState& g) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/navigation/PreintegratedRotation.h>
|
||||
|
|
@ -88,6 +100,8 @@ virtual class PreintegratedRotationParams {
|
|||
virtual class PreintegrationParams : gtsam::PreintegratedRotationParams {
|
||||
PreintegrationParams(Vector n_gravity);
|
||||
|
||||
gtsam::Vector n_gravity;
|
||||
|
||||
static gtsam::PreintegrationParams* MakeSharedD(double g);
|
||||
static gtsam::PreintegrationParams* MakeSharedU(double g);
|
||||
static gtsam::PreintegrationParams* MakeSharedD(); // default g = 9.81
|
||||
|
|
@ -104,6 +118,12 @@ virtual class PreintegrationParams : gtsam::PreintegratedRotationParams {
|
|||
Matrix getAccelerometerCovariance() const;
|
||||
Matrix getIntegrationCovariance() const;
|
||||
bool getUse2ndOrderCoriolis() const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
#include <gtsam/navigation/ImuFactor.h>
|
||||
|
|
@ -133,6 +153,12 @@ class PreintegratedImuMeasurements {
|
|||
Vector biasHatVector() const;
|
||||
gtsam::NavState predict(const gtsam::NavState& state_i,
|
||||
const gtsam::imuBias::ConstantBias& bias) const;
|
||||
|
||||
// enabling serialization functionality
|
||||
void serialize() const;
|
||||
|
||||
// enable pickling in python
|
||||
void pickle() const;
|
||||
};
|
||||
|
||||
virtual class ImuFactor: gtsam::NonlinearFactor {
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ TEST( Rot3AttitudeFactor, Constructor ) {
|
|||
/* ************************************************************************* */
|
||||
// Export Noisemodels
|
||||
// See http://www.boost.org/doc/libs/1_32_0/libs/serialization/doc/special.html
|
||||
BOOST_CLASS_EXPORT(gtsam::noiseModel::Isotropic);
|
||||
BOOST_CLASS_EXPORT(gtsam::noiseModel::Isotropic)
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Rot3AttitudeFactor, Serialization) {
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ TEST( GPSFactor, Constructor ) {
|
|||
|
||||
// Calculate numerical derivatives
|
||||
Matrix expectedH = numericalDerivative11<Vector,Pose3>(
|
||||
std::bind(&GPSFactor::evaluateError, &factor, _1, boost::none), T);
|
||||
std::bind(&GPSFactor::evaluateError, &factor, std::placeholders::_1, boost::none), T);
|
||||
|
||||
// Use the factor to calculate the derivative
|
||||
Matrix actualH;
|
||||
|
|
@ -101,7 +101,7 @@ TEST( GPSFactor2, Constructor ) {
|
|||
|
||||
// Calculate numerical derivatives
|
||||
Matrix expectedH = numericalDerivative11<Vector,NavState>(
|
||||
std::bind(&GPSFactor2::evaluateError, &factor, _1, boost::none), T);
|
||||
std::bind(&GPSFactor2::evaluateError, &factor, std::placeholders::_1, boost::none), T);
|
||||
|
||||
// Use the factor to calculate the derivative
|
||||
Matrix actualH;
|
||||
|
|
|
|||
|
|
@ -31,16 +31,16 @@ using namespace gtsam;
|
|||
using namespace gtsam::serializationTestHelpers;
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained,
|
||||
"gtsam_noiseModel_Constrained");
|
||||
"gtsam_noiseModel_Constrained")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal,
|
||||
"gtsam_noiseModel_Diagonal");
|
||||
"gtsam_noiseModel_Diagonal")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian,
|
||||
"gtsam_noiseModel_Gaussian");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit");
|
||||
"gtsam_noiseModel_Gaussian")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,
|
||||
"gtsam_noiseModel_Isotropic");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
||||
"gtsam_noiseModel_Isotropic")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
|
||||
|
||||
template <typename P>
|
||||
P getPreintegratedMeasurements() {
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ TEST( MagFactor, unrotate ) {
|
|||
Point3 expected(22735.5, 314.502, 44202.5);
|
||||
EXPECT( assert_equal(expected, MagFactor::unrotate(theta,nM,H),1e-1));
|
||||
EXPECT( assert_equal(numericalDerivative11<Point3,Rot2> //
|
||||
(std::bind(&MagFactor::unrotate, _1, nM, none), theta), H, 1e-6));
|
||||
(std::bind(&MagFactor::unrotate, std::placeholders::_1, nM, none), theta), H, 1e-6));
|
||||
}
|
||||
|
||||
// *************************************************************************
|
||||
|
|
@ -76,35 +76,35 @@ TEST( MagFactor, Factors ) {
|
|||
MagFactor f(1, measured, s, dir, bias, model);
|
||||
EXPECT( assert_equal(Z_3x1,f.evaluateError(theta,H1),1e-5));
|
||||
EXPECT( assert_equal((Matrix)numericalDerivative11<Vector,Rot2> //
|
||||
(std::bind(&MagFactor::evaluateError, &f, _1, none), theta), H1, 1e-7));
|
||||
(std::bind(&MagFactor::evaluateError, &f, std::placeholders::_1, none), theta), H1, 1e-7));
|
||||
|
||||
// MagFactor1
|
||||
MagFactor1 f1(1, measured, s, dir, bias, model);
|
||||
EXPECT( assert_equal(Z_3x1,f1.evaluateError(nRb,H1),1e-5));
|
||||
EXPECT( assert_equal(numericalDerivative11<Vector,Rot3> //
|
||||
(std::bind(&MagFactor1::evaluateError, &f1, _1, none), nRb), H1, 1e-7));
|
||||
(std::bind(&MagFactor1::evaluateError, &f1, std::placeholders::_1, none), nRb), H1, 1e-7));
|
||||
|
||||
// MagFactor2
|
||||
MagFactor2 f2(1, 2, measured, nRb, model);
|
||||
EXPECT( assert_equal(Z_3x1,f2.evaluateError(scaled,bias,H1,H2),1e-5));
|
||||
EXPECT( assert_equal(numericalDerivative11<Vector,Point3> //
|
||||
(std::bind(&MagFactor2::evaluateError, &f2, _1, bias, none, none), scaled),//
|
||||
(std::bind(&MagFactor2::evaluateError, &f2, std::placeholders::_1, bias, none, none), scaled),//
|
||||
H1, 1e-7));
|
||||
EXPECT( assert_equal(numericalDerivative11<Vector,Point3> //
|
||||
(std::bind(&MagFactor2::evaluateError, &f2, scaled, _1, none, none), bias),//
|
||||
(std::bind(&MagFactor2::evaluateError, &f2, scaled, std::placeholders::_1, none, none), bias),//
|
||||
H2, 1e-7));
|
||||
|
||||
// MagFactor2
|
||||
MagFactor3 f3(1, 2, 3, measured, nRb, model);
|
||||
EXPECT(assert_equal(Z_3x1,f3.evaluateError(s,dir,bias,H1,H2,H3),1e-5));
|
||||
EXPECT(assert_equal((Matrix)numericalDerivative11<Vector,double> //
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, _1, dir, bias, none, none, none), s),//
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, std::placeholders::_1, dir, bias, none, none, none), s),//
|
||||
H1, 1e-7));
|
||||
EXPECT(assert_equal(numericalDerivative11<Vector,Unit3> //
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, s, _1, bias, none, none, none), dir),//
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, s, std::placeholders::_1, bias, none, none, none), dir),//
|
||||
H2, 1e-7));
|
||||
EXPECT(assert_equal(numericalDerivative11<Vector,Point3> //
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, s, dir, _1, none, none, none), bias),//
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, s, dir, std::placeholders::_1, none, none, none), bias),//
|
||||
H3, 1e-7));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -101,4 +101,4 @@ private:
|
|||
}
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -78,7 +78,8 @@ VectorValues DoglegOptimizerImpl::ComputeBlend(double delta, const VectorValues&
|
|||
|
||||
// Compute blended point
|
||||
if(verbose) cout << "In blend region with fraction " << tau << " of Newton's method point" << endl;
|
||||
VectorValues blend = (1. - tau) * x_u; axpy(tau, x_n, blend);
|
||||
VectorValues blend = (1. - tau) * x_u;
|
||||
blend += tau * x_n;
|
||||
return blend;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -246,6 +246,18 @@ struct apply_compose {
|
|||
return x.compose(y, H1, H2);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct apply_compose<double> {
|
||||
double operator()(const double& x, const double& y,
|
||||
OptionalJacobian<1, 1> H1 = boost::none,
|
||||
OptionalJacobian<1, 1> H2 = boost::none) const {
|
||||
if (H1) H1->setConstant(y);
|
||||
if (H2) H2->setConstant(x);
|
||||
return x * y;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
// Global methods:
|
||||
|
|
|
|||
|
|
@ -305,7 +305,7 @@ struct traits<ExpressionFactorN<T, Args...>>
|
|||
* \deprecated Prefer the more general ExpressionFactorN<>.
|
||||
*/
|
||||
template <typename T, typename A1, typename A2>
|
||||
class ExpressionFactor2 : public ExpressionFactorN<T, A1, A2> {
|
||||
class GTSAM_DEPRECATED ExpressionFactor2 : public ExpressionFactorN<T, A1, A2> {
|
||||
public:
|
||||
/// Destructor
|
||||
~ExpressionFactor2() override {}
|
||||
|
|
|
|||
|
|
@ -142,8 +142,9 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
* provides an extra interface for the user to initialize the weightst
|
||||
* */
|
||||
void setWeights(const Vector w) {
|
||||
if(w.size() != nfg_.size()){
|
||||
throw std::runtime_error("GncOptimizer::setWeights: the number of specified weights"
|
||||
if (size_t(w.size()) != nfg_.size()) {
|
||||
throw std::runtime_error(
|
||||
"GncOptimizer::setWeights: the number of specified weights"
|
||||
" does not match the size of the factor graph.");
|
||||
}
|
||||
weights_ = w;
|
||||
|
|
|
|||
|
|
@ -29,9 +29,6 @@ protected:
|
|||
GaussianFactor::shared_ptr factor_;
|
||||
boost::optional<Values> linearizationPoint_;
|
||||
|
||||
/** Default constructor - necessary for serialization */
|
||||
LinearContainerFactor() {}
|
||||
|
||||
/** direct copy constructor */
|
||||
GTSAM_EXPORT LinearContainerFactor(const GaussianFactor::shared_ptr& factor, const boost::optional<Values>& linearizationPoint);
|
||||
|
||||
|
|
@ -43,6 +40,9 @@ public:
|
|||
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
/** Default constructor - necessary for serialization */
|
||||
LinearContainerFactor() {}
|
||||
|
||||
/** Primary constructor: store a linear factor with optional linearization point */
|
||||
GTSAM_EXPORT LinearContainerFactor(const JacobianFactor& factor, const Values& linearizationPoint = Values());
|
||||
|
||||
|
|
|
|||
|
|
@ -80,11 +80,15 @@ Marginals::Marginals(const GaussianFactorGraph& graph, const VectorValues& solut
|
|||
|
||||
/* ************************************************************************* */
|
||||
void Marginals::computeBayesTree() {
|
||||
// The default ordering to use.
|
||||
const Ordering::OrderingType defaultOrderingType = Ordering::COLAMD;
|
||||
// Compute BayesTree
|
||||
if(factorization_ == CHOLESKY)
|
||||
bayesTree_ = *graph_.eliminateMultifrontal(EliminatePreferCholesky);
|
||||
else if(factorization_ == QR)
|
||||
bayesTree_ = *graph_.eliminateMultifrontal(EliminateQR);
|
||||
if (factorization_ == CHOLESKY)
|
||||
bayesTree_ = *graph_.eliminateMultifrontal(defaultOrderingType,
|
||||
EliminatePreferCholesky);
|
||||
else if (factorization_ == QR)
|
||||
bayesTree_ =
|
||||
*graph_.eliminateMultifrontal(defaultOrderingType, EliminateQR);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
|||
|
|
@ -131,17 +131,19 @@ protected:
|
|||
void computeBayesTree(const Ordering& ordering);
|
||||
|
||||
public:
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
|
||||
/** \deprecated argument order changed due to removing boost::optional<Ordering> */
|
||||
Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization,
|
||||
GTSAM_DEPRECATED Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization,
|
||||
const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {}
|
||||
|
||||
/** \deprecated argument order changed due to removing boost::optional<Ordering> */
|
||||
Marginals(const GaussianFactorGraph& graph, const Values& solution, Factorization factorization,
|
||||
GTSAM_DEPRECATED Marginals(const GaussianFactorGraph& graph, const Values& solution, Factorization factorization,
|
||||
const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {}
|
||||
|
||||
/** \deprecated argument order changed due to removing boost::optional<Ordering> */
|
||||
Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, Factorization factorization,
|
||||
GTSAM_DEPRECATED Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, Factorization factorization,
|
||||
const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {}
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ boost::tuple<V, int> nonlinearConjugateGradient(const S &system,
|
|||
const V &initial, const NonlinearOptimizerParams ¶ms,
|
||||
const bool singleIteration, const bool gradientDescent = false) {
|
||||
|
||||
// GTSAM_CONCEPT_MANIFOLD_TYPE(V);
|
||||
// GTSAM_CONCEPT_MANIFOLD_TYPE(V)
|
||||
|
||||
size_t iteration = 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -219,7 +219,6 @@ protected:
|
|||
X value_; /// fixed value for variable
|
||||
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(X)
|
||||
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(X)
|
||||
|
||||
public:
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ double NoiseModelFactor::weight(const Values& c) const {
|
|||
if (noiseModel_) {
|
||||
const Vector b = unwhitenedError(c);
|
||||
check(noiseModel_, b.size());
|
||||
return 0.5 * noiseModel_->weight(b);
|
||||
return noiseModel_->weight(b);
|
||||
}
|
||||
else
|
||||
return 1.0;
|
||||
|
|
|
|||
|
|
@ -265,15 +265,17 @@ namespace gtsam {
|
|||
|
||||
public:
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
|
||||
/** \deprecated */
|
||||
boost::shared_ptr<HessianFactor> linearizeToHessianFactor(
|
||||
boost::shared_ptr<HessianFactor> GTSAM_DEPRECATED linearizeToHessianFactor(
|
||||
const Values& values, boost::none_t, const Dampen& dampen = nullptr) const
|
||||
{return linearizeToHessianFactor(values, dampen);}
|
||||
|
||||
/** \deprecated */
|
||||
Values updateCholesky(const Values& values, boost::none_t,
|
||||
Values GTSAM_DEPRECATED updateCholesky(const Values& values, boost::none_t,
|
||||
const Dampen& dampen = nullptr) const
|
||||
{return updateCholesky(values, dampen);}
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -147,11 +147,13 @@ VectorValues NonlinearOptimizer::solve(const GaussianFactorGraph& gfg,
|
|||
} else if (params.isSequential()) {
|
||||
// Sequential QR or Cholesky (decided by params.getEliminationFunction())
|
||||
if (params.ordering)
|
||||
delta = gfg.eliminateSequential(*params.ordering, params.getEliminationFunction(),
|
||||
boost::none, params.orderingType)->optimize();
|
||||
delta = gfg.eliminateSequential(*params.ordering,
|
||||
params.getEliminationFunction())
|
||||
->optimize();
|
||||
else
|
||||
delta = gfg.eliminateSequential(params.getEliminationFunction(), boost::none,
|
||||
params.orderingType)->optimize();
|
||||
delta = gfg.eliminateSequential(params.orderingType,
|
||||
params.getEliminationFunction())
|
||||
->optimize();
|
||||
} else if (params.isIterative()) {
|
||||
// Conjugate Gradient -> needs params.iterativeParams
|
||||
if (!params.iterativeParams)
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/base/FastDefaultAllocator.h>
|
||||
#include <gtsam/base/GenericValue.h>
|
||||
#include <gtsam/base/VectorSpace.h>
|
||||
#include <gtsam/inference/Key.h>
|
||||
|
|
@ -62,17 +63,18 @@ namespace gtsam {
|
|||
class GTSAM_EXPORT Values {
|
||||
|
||||
private:
|
||||
|
||||
// Internally we store a boost ptr_map, with a ValueCloneAllocator (defined
|
||||
// below) to clone and deallocate the Value objects, and a boost
|
||||
// fast_pool_allocator to allocate map nodes. In this way, all memory is
|
||||
// allocated in a boost memory pool.
|
||||
// below) to clone and deallocate the Value objects, and our compile-flag-
|
||||
// dependent FastDefaultAllocator to allocate map nodes. In this way, the
|
||||
// user defines the allocation details (i.e. optimize for memory pool/arenas
|
||||
// concurrency).
|
||||
typedef internal::FastDefaultAllocator<typename std::pair<const Key, void*>>::type KeyValuePtrPairAllocator;
|
||||
typedef boost::ptr_map<
|
||||
Key,
|
||||
Value,
|
||||
std::less<Key>,
|
||||
ValueCloneAllocator,
|
||||
boost::fast_pool_allocator<std::pair<const Key, void*> > > KeyValueMap;
|
||||
KeyValuePtrPairAllocator > KeyValueMap;
|
||||
|
||||
// The member to store the values, see just above
|
||||
KeyValueMap values_;
|
||||
|
|
|
|||
|
|
@ -35,8 +35,7 @@ namespace gtsam {
|
|||
* zero errors anyway. However, it means that below will only be exact for the correct measurement.
|
||||
*/
|
||||
JacobianFactor linearizeNumerically(const NoiseModelFactor& factor,
|
||||
const Values& values, double delta = 1e-5) {
|
||||
|
||||
const Values& values, double delta = 1e-5) {
|
||||
// We will fill a vector of key/Jacobians pairs (a map would sort)
|
||||
std::vector<std::pair<Key, Matrix> > jacobians;
|
||||
|
||||
|
|
@ -46,24 +45,24 @@ JacobianFactor linearizeNumerically(const NoiseModelFactor& factor,
|
|||
|
||||
// Loop over all variables
|
||||
const double one_over_2delta = 1.0 / (2.0 * delta);
|
||||
for(Key key: factor) {
|
||||
for (Key key : factor) {
|
||||
// Compute central differences using the values struct.
|
||||
VectorValues dX = values.zeroVectors();
|
||||
const size_t cols = dX.dim(key);
|
||||
Matrix J = Matrix::Zero(rows, cols);
|
||||
for (size_t col = 0; col < cols; ++col) {
|
||||
Eigen::VectorXd dx = Eigen::VectorXd::Zero(cols);
|
||||
dx[col] = delta;
|
||||
dx(col) = delta;
|
||||
dX[key] = dx;
|
||||
Values eval_values = values.retract(dX);
|
||||
const Eigen::VectorXd left = factor.whitenedError(eval_values);
|
||||
dx[col] = -delta;
|
||||
dx(col) = -delta;
|
||||
dX[key] = dx;
|
||||
eval_values = values.retract(dX);
|
||||
const Eigen::VectorXd right = factor.whitenedError(eval_values);
|
||||
J.col(col) = (left - right) * one_over_2delta;
|
||||
}
|
||||
jacobians.push_back(std::make_pair(key,J));
|
||||
jacobians.push_back(std::make_pair(key, J));
|
||||
}
|
||||
|
||||
// Next step...return JacobianFactor
|
||||
|
|
|
|||
|
|
@ -115,6 +115,10 @@ class Ordering {
|
|||
Ordering();
|
||||
Ordering(const gtsam::Ordering& other);
|
||||
|
||||
template <FACTOR_GRAPH = {gtsam::NonlinearFactorGraph,
|
||||
gtsam::GaussianFactorGraph}>
|
||||
static gtsam::Ordering Colamd(const FACTOR_GRAPH& graph);
|
||||
|
||||
// Testable
|
||||
void print(string s = "", const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
|
|
@ -734,7 +738,12 @@ class ISAM2 {
|
|||
const gtsam::KeyList& extraReelimKeys,
|
||||
bool force_relinearize);
|
||||
|
||||
gtsam::ISAM2Result update(const gtsam::NonlinearFactorGraph& newFactors,
|
||||
const gtsam::Values& newTheta,
|
||||
const gtsam::ISAM2UpdateParams& updateParams);
|
||||
|
||||
gtsam::Values getLinearizationPoint() const;
|
||||
bool valueExists(gtsam::Key key) const;
|
||||
gtsam::Values calculateEstimate() const;
|
||||
template <VALUE = {gtsam::Point2, gtsam::Rot2, gtsam::Pose2, gtsam::Point3,
|
||||
gtsam::Rot3, gtsam::Pose3, gtsam::Cal3_S2, gtsam::Cal3DS2,
|
||||
|
|
@ -744,12 +753,17 @@ class ISAM2 {
|
|||
gtsam::PinholeCamera<gtsam::Cal3Fisheye>,
|
||||
gtsam::PinholeCamera<gtsam::Cal3Unified>, Vector, Matrix}>
|
||||
VALUE calculateEstimate(size_t key) const;
|
||||
gtsam::Values calculateBestEstimate() const;
|
||||
Matrix marginalCovariance(size_t key) const;
|
||||
gtsam::Values calculateBestEstimate() const;
|
||||
gtsam::VectorValues getDelta() const;
|
||||
double error(const gtsam::VectorValues& x) const;
|
||||
gtsam::NonlinearFactorGraph getFactorsUnsafe() const;
|
||||
gtsam::VariableIndex getVariableIndex() const;
|
||||
const gtsam::KeySet& getFixedVariables() const;
|
||||
gtsam::ISAM2Params params() const;
|
||||
|
||||
void printStats() const;
|
||||
gtsam::VectorValues gradientAtZero() const;
|
||||
};
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearISAM.h>
|
||||
|
|
|
|||
|
|
@ -293,6 +293,19 @@ TEST(Expression, compose3) {
|
|||
EXPECT(expected == R3.keys());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Test compose with double type (should be multiplication).
|
||||
TEST(Expression, compose4) {
|
||||
// Create expression
|
||||
gtsam::Key key = 1;
|
||||
Double_ R1(key), R2(key);
|
||||
Double_ R3 = R1 * R2;
|
||||
|
||||
// Check keys
|
||||
set<Key> expected = list_of(1);
|
||||
EXPECT(expected == R3.keys());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Test with ternary function.
|
||||
Rot3 composeThree(const Rot3& R1, const Rot3& R2, const Rot3& R3, OptionalJacobian<3, 3> H1,
|
||||
|
|
|
|||
|
|
@ -35,37 +35,37 @@ using namespace gtsam::serializationTestHelpers;
|
|||
|
||||
/* ************************************************************************* */
|
||||
// Create GUIDs for Noisemodels
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,"gtsam_noiseModel_Isotropic");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,"gtsam_noiseModel_Isotropic")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Create GUIDs for factors
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::PriorFactor<gtsam::Pose3>, "gtsam::PriorFactor<gtsam::Pose3>");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::PriorFactor<gtsam::Pose3>, "gtsam::PriorFactor<gtsam::Pose3>")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional")
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Export all classes derived from Value
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3_S2);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3Bundler);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Point3);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Pose3);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Rot3);
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3_S2>);
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3DS2>);
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3Bundler>);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3_S2)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3Bundler)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Point3)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Pose3)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Rot3)
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3_S2>)
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3DS2>)
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3Bundler>)
|
||||
|
||||
namespace detail {
|
||||
template<class T> struct pack {
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ namespace gtsam {
|
|||
boost::none, boost::optional<Matrix&> H2 = boost::none) const override {
|
||||
T hx = traits<T>::Between(p1, p2, H1, H2); // h(x)
|
||||
// manifold equivalent of h(x)-z -> log(z,h(x))
|
||||
#ifdef SLOW_BUT_CORRECT_BETWEENFACTOR
|
||||
#ifdef GTSAM_SLOW_BUT_CORRECT_BETWEENFACTOR
|
||||
typename traits<T>::ChartJacobian::Jacobian Hlocal;
|
||||
Vector rval = traits<T>::Local(measured_, hx, boost::none, (H1 || H2) ? &Hlocal : 0);
|
||||
if (H1) *H1 = Hlocal * (*H1);
|
||||
|
|
|
|||
|
|
@ -59,8 +59,8 @@ namespace gtsam {
|
|||
template<class CAMERA, class LANDMARK>
|
||||
class GeneralSFMFactor: public NoiseModelFactor2<CAMERA, LANDMARK> {
|
||||
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CAMERA);
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(LANDMARK);
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CAMERA)
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(LANDMARK)
|
||||
|
||||
static const int DimC = FixedDimension<CAMERA>::value;
|
||||
static const int DimL = FixedDimension<LANDMARK>::value;
|
||||
|
|
@ -202,7 +202,7 @@ struct traits<GeneralSFMFactor<CAMERA, LANDMARK> > : Testable<
|
|||
template<class CALIBRATION>
|
||||
class GeneralSFMFactor2: public NoiseModelFactor3<Pose3, Point3, CALIBRATION> {
|
||||
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION);
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION)
|
||||
static const int DimK = FixedDimension<CALIBRATION>::value;
|
||||
|
||||
protected:
|
||||
|
|
|
|||
|
|
@ -9,20 +9,21 @@
|
|||
|
||||
namespace gtsam {
|
||||
/**
|
||||
* JacobianFactor for Schur complement that uses the "Nullspace Trick" by Mourikis
|
||||
* JacobianFactor for Schur complement that uses the "Nullspace Trick" by
|
||||
* Mourikis et al.
|
||||
*
|
||||
* This trick is equivalent to the Schur complement, but can be faster.
|
||||
* In essence, the linear factor |E*dp + F*dX - b|, where p is point and X are poses,
|
||||
* is multiplied by Enull, a matrix that spans the left nullspace of E, i.e.,
|
||||
* The mx3 matrix is analyzed with SVD as E = [Erange Enull]*S*V (mxm * mx3 * 3x3)
|
||||
* where Enull is an m x (m-3) matrix
|
||||
* Then Enull'*E*dp = 0, and
|
||||
* In essence, the linear factor |E*dp + F*dX - b|, where p is point and X are
|
||||
* poses, is multiplied by Enull, a matrix that spans the left nullspace of E,
|
||||
* i.e., The mx3 matrix is analyzed with SVD as E = [Erange Enull]*S*V (mxm *
|
||||
* mx3 * 3x3) where Enull is an m x (m-3) matrix Then Enull'*E*dp = 0, and
|
||||
* |Enull'*E*dp + Enull'*F*dX - Enull'*b| == |Enull'*F*dX - Enull'*b|
|
||||
* Normally F is m x 6*numKeys, and Enull'*F yields an (m-3) x 6*numKeys matrix.
|
||||
*
|
||||
* The code below assumes that F is block diagonal and is given as a vector of ZDim*D blocks.
|
||||
* Example: m = 4 (2 measurements), Enull = 4*1, F = 4*12 (for D=6)
|
||||
* Then Enull'*F = 1*4 * 4*12 = 1*12, but each 1*6 piece can be computed as a 1x2 * 2x6 mult
|
||||
* The code below assumes that F is block diagonal and is given as a vector of
|
||||
* ZDim*D blocks. Example: m = 4 (2 measurements), Enull = 4*1, F = 4*12 (for
|
||||
* D=6) Then Enull'*F = 1*4 * 4*12 = 1*12, but each 1*6 piece can be computed as
|
||||
* a 1x2 * 2x6 multiplication.
|
||||
*/
|
||||
template<size_t D, size_t ZDim>
|
||||
class JacobianFactorSVD: public RegularJacobianFactor<D> {
|
||||
|
|
@ -37,10 +38,10 @@ public:
|
|||
JacobianFactorSVD() {
|
||||
}
|
||||
|
||||
/// Empty constructor with keys
|
||||
JacobianFactorSVD(const KeyVector& keys, //
|
||||
const SharedDiagonal& model = SharedDiagonal()) :
|
||||
Base() {
|
||||
/// Empty constructor with keys.
|
||||
JacobianFactorSVD(const KeyVector& keys,
|
||||
const SharedDiagonal& model = SharedDiagonal())
|
||||
: Base() {
|
||||
Matrix zeroMatrix = Matrix::Zero(0, D);
|
||||
Vector zeroVector = Vector::Zero(0);
|
||||
std::vector<KeyMatrix> QF;
|
||||
|
|
@ -51,18 +52,21 @@ public:
|
|||
}
|
||||
|
||||
/**
|
||||
* @brief Constructor
|
||||
* Takes the CameraSet derivatives (as ZDim*D blocks of block-diagonal F)
|
||||
* and a reduced point derivative, Enull
|
||||
* and creates a reduced-rank Jacobian factor on the CameraSet
|
||||
* @brief Construct a new JacobianFactorSVD object, createing a reduced-rank
|
||||
* Jacobian factor on the CameraSet.
|
||||
*
|
||||
* @Fblocks:
|
||||
* @param keys keys associated with F blocks.
|
||||
* @param Fblocks CameraSet derivatives, ZDim*D blocks of block-diagonal F
|
||||
* @param Enull a reduced point derivative
|
||||
* @param b right-hand side
|
||||
* @param model noise model
|
||||
*/
|
||||
JacobianFactorSVD(const KeyVector& keys,
|
||||
const std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> >& Fblocks, const Matrix& Enull,
|
||||
const Vector& b, //
|
||||
const SharedDiagonal& model = SharedDiagonal()) :
|
||||
Base() {
|
||||
JacobianFactorSVD(
|
||||
const KeyVector& keys,
|
||||
const std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> >& Fblocks,
|
||||
const Matrix& Enull, const Vector& b,
|
||||
const SharedDiagonal& model = SharedDiagonal())
|
||||
: Base() {
|
||||
size_t numKeys = Enull.rows() / ZDim;
|
||||
size_t m2 = ZDim * numKeys - 3; // TODO: is this not just Enull.rows()?
|
||||
// PLAIN nullptr SPACE TRICK
|
||||
|
|
@ -74,9 +78,8 @@ public:
|
|||
QF.reserve(numKeys);
|
||||
for (size_t k = 0; k < Fblocks.size(); ++k) {
|
||||
Key key = keys[k];
|
||||
QF.push_back(
|
||||
KeyMatrix(key,
|
||||
(Enull.transpose()).block(0, ZDim * k, m2, ZDim) * Fblocks[k]));
|
||||
QF.emplace_back(
|
||||
key, (Enull.transpose()).block(0, ZDim * k, m2, ZDim) * Fblocks[k]);
|
||||
}
|
||||
JacobianFactor::fillTerms(QF, Enull.transpose() * b, model);
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue