Merge branch 'borglab:develop' into Fix-Cal3Fisheye-Jacobian
commit
8846324b34
|
@ -7,9 +7,9 @@ if (GTSAM_WITH_TBB)
|
|||
if(TBB_FOUND)
|
||||
set(GTSAM_USE_TBB 1) # This will go into config.h
|
||||
|
||||
if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
|
||||
message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
|
||||
endif()
|
||||
# if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
|
||||
# message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
|
||||
# endif()
|
||||
|
||||
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
|
||||
set(TBB_GREATER_EQUAL_2020 1)
|
||||
|
|
388
doc/math.lyx
388
doc/math.lyx
|
@ -5082,6 +5082,394 @@ reference "ex:projection"
|
|||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
Derivative of Adjoint
|
||||
\begin_inset CommandInset label
|
||||
LatexCommand label
|
||||
name "subsec:pose3_adjoint_deriv"
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
Consider
|
||||
\begin_inset Formula $f:SE(3)\times\mathbb{R}^{6}\rightarrow\mathbb{R}^{6}$
|
||||
\end_inset
|
||||
|
||||
is defined as
|
||||
\begin_inset Formula $f(T,\xi_{b})=Ad_{T}\hat{\xi}_{b}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
The derivative is notated (see Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "sec:Derivatives-of-Actions"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
):
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
Df_{(T,\xi_{b})}(\xi,\delta\xi_{b})=D_{1}f_{(T,\xi_{b})}(\xi)+D_{2}f_{(T,\xi_{b})}(\delta\xi_{b})
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
First, computing
|
||||
\begin_inset Formula $D_{2}f_{(T,\xi_{b})}(\xi_{b})$
|
||||
\end_inset
|
||||
|
||||
is easy, as its matrix is simply
|
||||
\begin_inset Formula $Ad_{T}$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
f(T,\xi_{b}+\delta\xi_{b})=Ad_{T}(\widehat{\xi_{b}+\delta\xi_{b}})=Ad_{T}(\hat{\xi}_{b})+Ad_{T}(\delta\hat{\xi}_{b})
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
D_{2}f_{(T,\xi_{b})}(\xi_{b})=Ad_{T}
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
We will derive
|
||||
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi)$
|
||||
\end_inset
|
||||
|
||||
using two approaches.
|
||||
In the first, we'll define
|
||||
\begin_inset Formula $g(T,\xi)\triangleq T\exp\hat{\xi}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
From Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "sec:Derivatives-of-Actions"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
,
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
D_{2}g_{(T,\xi)}(\xi) & =T\hat{\xi}\\
|
||||
D_{2}g_{(T,\xi)}^{-1}(\xi) & =-\hat{\xi}T^{-1}
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
Now we can use the definition of the Adjoint representation
|
||||
\begin_inset Formula $Ad_{g}\hat{\xi}=g\hat{\xi}g^{-1}$
|
||||
\end_inset
|
||||
|
||||
(aka conjugation by
|
||||
\begin_inset Formula $g$
|
||||
\end_inset
|
||||
|
||||
) then apply product rule and simplify:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
D_{1}f_{(T,\xi_{b})}(\xi)=D_{1}\left(Ad_{T\exp(\hat{\xi})}\hat{\xi}_{b}\right)(\xi) & =D_{1}\left(g\hat{\xi}_{b}g^{-1}\right)(\xi)\\
|
||||
& =\left(D_{2}g_{(T,\xi)}(\xi)\right)\hat{\xi}_{b}g^{-1}(T,0)+g(T,0)\hat{\xi}_{b}\left(D_{2}g_{(T,\xi)}^{-1}(\xi)\right)\\
|
||||
& =T\hat{\xi}\hat{\xi}_{b}T^{-1}-T\hat{\xi}_{b}\hat{\xi}T^{-1}\\
|
||||
& =T\left(\hat{\xi}\hat{\xi}_{b}-\hat{\xi}_{b}\hat{\xi}\right)T^{-1}\\
|
||||
& =Ad_{T}(ad_{\hat{\xi}}\hat{\xi}_{b})\\
|
||||
& =-Ad_{T}(ad_{\hat{\xi}_{b}}\hat{\xi})\\
|
||||
D_{1}F_{(T,\xi_{b})} & =-(Ad_{T})(ad_{\hat{\xi}_{b}})
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
Where
|
||||
\begin_inset Formula $ad_{\hat{\xi}}:\mathfrak{g}\rightarrow\mathfrak{g}$
|
||||
\end_inset
|
||||
|
||||
is the adjoint map of the lie algebra.
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The second, perhaps more intuitive way of deriving
|
||||
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi_{b})$
|
||||
\end_inset
|
||||
|
||||
, would be to use the fact that the derivative at the origin
|
||||
\begin_inset Formula $D_{1}Ad_{I}\hat{\xi}_{b}=ad_{\hat{\xi}_{b}}$
|
||||
\end_inset
|
||||
|
||||
by definition of the adjoint
|
||||
\begin_inset Formula $ad_{\xi}$
|
||||
\end_inset
|
||||
|
||||
.
|
||||
Then applying the property
|
||||
\begin_inset Formula $Ad_{AB}=Ad_{A}Ad_{B}$
|
||||
\end_inset
|
||||
|
||||
,
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\[
|
||||
D_{1}Ad_{T}\hat{\xi}_{b}(\xi)=D_{1}Ad_{T*I}\hat{\xi}_{b}(\xi)=Ad_{T}\left(D_{1}Ad_{I}\hat{\xi}_{b}(\xi)\right)=Ad_{T}\left(ad_{\hat{\xi}}(\hat{\xi}_{b})\right)=-Ad_{T}\left(ad_{\hat{\xi}_{b}}(\hat{\xi})\right)
|
||||
\]
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
Derivative of AdjointTranspose
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
The transpose of the Adjoint,
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $Ad_{T}^{T}:\mathfrak{g^{*}\rightarrow g^{*}}$
|
||||
\end_inset
|
||||
|
||||
, is useful as a way to change the reference frame of vectors in the dual
|
||||
space
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
(note the
|
||||
\begin_inset Formula $^{*}$
|
||||
\end_inset
|
||||
|
||||
denoting that we are now in the dual space)
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
.
|
||||
To be more concrete, where
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
as
|
||||
\begin_inset Formula $Ad_{T}\hat{\xi}_{b}$
|
||||
\end_inset
|
||||
|
||||
converts the
|
||||
\emph on
|
||||
twist
|
||||
\emph default
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $\xi_{b}$
|
||||
\end_inset
|
||||
|
||||
from the
|
||||
\begin_inset Formula $T$
|
||||
\end_inset
|
||||
|
||||
frame,
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
converts the
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph on
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
wrench
|
||||
\emph default
|
||||
|
||||
\family roman
|
||||
\series medium
|
||||
\shape up
|
||||
\size normal
|
||||
\emph off
|
||||
\bar no
|
||||
\strikeout off
|
||||
\xout off
|
||||
\uuline off
|
||||
\uwave off
|
||||
\noun off
|
||||
\color none
|
||||
|
||||
\begin_inset Formula $\xi_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
from the
|
||||
\begin_inset Formula $T$
|
||||
\end_inset
|
||||
|
||||
frame
|
||||
\family default
|
||||
\series default
|
||||
\shape default
|
||||
\size default
|
||||
\emph default
|
||||
\bar default
|
||||
\strikeout default
|
||||
\xout default
|
||||
\uuline default
|
||||
\uwave default
|
||||
\noun default
|
||||
\color inherit
|
||||
.
|
||||
It's difficult to apply a similar derivation as in Section
|
||||
\begin_inset CommandInset ref
|
||||
LatexCommand ref
|
||||
reference "subsec:pose3_adjoint_deriv"
|
||||
plural "false"
|
||||
caps "false"
|
||||
noprefix "false"
|
||||
|
||||
\end_inset
|
||||
|
||||
for the derivative of
|
||||
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
|
||||
\end_inset
|
||||
|
||||
because
|
||||
\begin_inset Formula $Ad_{T}^{T}$
|
||||
\end_inset
|
||||
|
||||
cannot be naturally defined as a conjugation, so we resort to crunching
|
||||
through the algebra.
|
||||
The details are omitted but the result is a form that vaguely resembles
|
||||
(but does not exactly match)
|
||||
\begin_inset Formula $ad(Ad_{T}^{T}\hat{\xi}_{b}^{*})$
|
||||
\end_inset
|
||||
|
||||
:
|
||||
\end_layout
|
||||
|
||||
\begin_layout Standard
|
||||
\begin_inset Formula
|
||||
\begin{align*}
|
||||
\begin{bmatrix}\omega_{T}\\
|
||||
v_{T}
|
||||
\end{bmatrix}^{*} & \triangleq Ad_{T}^{T}\hat{\xi}_{b}^{*}\\
|
||||
D_{1}Ad_{T}^{T}\hat{\xi}_{b}^{*}(\xi) & =\begin{bmatrix}\hat{\omega}_{T} & \hat{v}_{T}\\
|
||||
\hat{v}_{T} & 0
|
||||
\end{bmatrix}
|
||||
\end{align*}
|
||||
|
||||
\end_inset
|
||||
|
||||
|
||||
\end_layout
|
||||
|
||||
\begin_layout Subsection
|
||||
|
|
BIN
doc/math.pdf
BIN
doc/math.pdf
Binary file not shown.
|
@ -370,4 +370,4 @@ public:
|
|||
* the gtsam namespace to be more easily enforced as testable
|
||||
*/
|
||||
#define GTSAM_CONCEPT_LIE_INST(T) template class gtsam::IsLieGroup<T>;
|
||||
#define GTSAM_CONCEPT_LIE_TYPE(T) typedef gtsam::IsLieGroup<T> _gtsam_IsLieGroup_##T;
|
||||
#define GTSAM_CONCEPT_LIE_TYPE(T) using _gtsam_IsLieGroup_##T = gtsam::IsLieGroup<T>;
|
||||
|
|
|
@ -178,4 +178,4 @@ struct FixedDimension {
|
|||
// * the gtsam namespace to be more easily enforced as testable
|
||||
// */
|
||||
#define GTSAM_CONCEPT_MANIFOLD_INST(T) template class gtsam::IsManifold<T>;
|
||||
#define GTSAM_CONCEPT_MANIFOLD_TYPE(T) typedef gtsam::IsManifold<T> _gtsam_IsManifold_##T;
|
||||
#define GTSAM_CONCEPT_MANIFOLD_TYPE(T) using _gtsam_IsManifold_##T = gtsam::IsManifold<T>;
|
||||
|
|
|
@ -46,28 +46,28 @@ typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> M
|
|||
// Create handy typedefs and constants for square-size matrices
|
||||
// MatrixMN, MatrixN = MatrixNN, I_NxN, and Z_NxN, for M,N=1..9
|
||||
#define GTSAM_MAKE_MATRIX_DEFS(N) \
|
||||
typedef Eigen::Matrix<double, N, N> Matrix##N; \
|
||||
typedef Eigen::Matrix<double, 1, N> Matrix1##N; \
|
||||
typedef Eigen::Matrix<double, 2, N> Matrix2##N; \
|
||||
typedef Eigen::Matrix<double, 3, N> Matrix3##N; \
|
||||
typedef Eigen::Matrix<double, 4, N> Matrix4##N; \
|
||||
typedef Eigen::Matrix<double, 5, N> Matrix5##N; \
|
||||
typedef Eigen::Matrix<double, 6, N> Matrix6##N; \
|
||||
typedef Eigen::Matrix<double, 7, N> Matrix7##N; \
|
||||
typedef Eigen::Matrix<double, 8, N> Matrix8##N; \
|
||||
typedef Eigen::Matrix<double, 9, N> Matrix9##N; \
|
||||
using Matrix##N = Eigen::Matrix<double, N, N>; \
|
||||
using Matrix1##N = Eigen::Matrix<double, 1, N>; \
|
||||
using Matrix2##N = Eigen::Matrix<double, 2, N>; \
|
||||
using Matrix3##N = Eigen::Matrix<double, 3, N>; \
|
||||
using Matrix4##N = Eigen::Matrix<double, 4, N>; \
|
||||
using Matrix5##N = Eigen::Matrix<double, 5, N>; \
|
||||
using Matrix6##N = Eigen::Matrix<double, 6, N>; \
|
||||
using Matrix7##N = Eigen::Matrix<double, 7, N>; \
|
||||
using Matrix8##N = Eigen::Matrix<double, 8, N>; \
|
||||
using Matrix9##N = Eigen::Matrix<double, 9, N>; \
|
||||
static const Eigen::MatrixBase<Matrix##N>::IdentityReturnType I_##N##x##N = Matrix##N::Identity(); \
|
||||
static const Eigen::MatrixBase<Matrix##N>::ConstantReturnType Z_##N##x##N = Matrix##N::Zero();
|
||||
|
||||
GTSAM_MAKE_MATRIX_DEFS(1);
|
||||
GTSAM_MAKE_MATRIX_DEFS(2);
|
||||
GTSAM_MAKE_MATRIX_DEFS(3);
|
||||
GTSAM_MAKE_MATRIX_DEFS(4);
|
||||
GTSAM_MAKE_MATRIX_DEFS(5);
|
||||
GTSAM_MAKE_MATRIX_DEFS(6);
|
||||
GTSAM_MAKE_MATRIX_DEFS(7);
|
||||
GTSAM_MAKE_MATRIX_DEFS(8);
|
||||
GTSAM_MAKE_MATRIX_DEFS(9);
|
||||
GTSAM_MAKE_MATRIX_DEFS(1)
|
||||
GTSAM_MAKE_MATRIX_DEFS(2)
|
||||
GTSAM_MAKE_MATRIX_DEFS(3)
|
||||
GTSAM_MAKE_MATRIX_DEFS(4)
|
||||
GTSAM_MAKE_MATRIX_DEFS(5)
|
||||
GTSAM_MAKE_MATRIX_DEFS(6)
|
||||
GTSAM_MAKE_MATRIX_DEFS(7)
|
||||
GTSAM_MAKE_MATRIX_DEFS(8)
|
||||
GTSAM_MAKE_MATRIX_DEFS(9)
|
||||
|
||||
// Matrix expressions for accessing parts of matrices
|
||||
typedef Eigen::Block<Matrix> SubMatrix;
|
||||
|
|
|
@ -173,4 +173,4 @@ namespace gtsam {
|
|||
* @deprecated please use BOOST_CONCEPT_ASSERT and
|
||||
*/
|
||||
#define GTSAM_CONCEPT_TESTABLE_INST(T) template class gtsam::IsTestable<T>;
|
||||
#define GTSAM_CONCEPT_TESTABLE_TYPE(T) typedef gtsam::IsTestable<T> _gtsam_Testable_##T;
|
||||
#define GTSAM_CONCEPT_TESTABLE_TYPE(T) using _gtsam_Testable_##T = gtsam::IsTestable<T>;
|
||||
|
|
|
@ -48,18 +48,18 @@ static const Eigen::MatrixBase<Vector3>::ConstantReturnType Z_3x1 = Vector3::Zer
|
|||
// Create handy typedefs and constants for vectors with N>3
|
||||
// VectorN and Z_Nx1, for N=1..9
|
||||
#define GTSAM_MAKE_VECTOR_DEFS(N) \
|
||||
typedef Eigen::Matrix<double, N, 1> Vector##N; \
|
||||
using Vector##N = Eigen::Matrix<double, N, 1>; \
|
||||
static const Eigen::MatrixBase<Vector##N>::ConstantReturnType Z_##N##x1 = Vector##N::Zero();
|
||||
|
||||
GTSAM_MAKE_VECTOR_DEFS(4);
|
||||
GTSAM_MAKE_VECTOR_DEFS(5);
|
||||
GTSAM_MAKE_VECTOR_DEFS(6);
|
||||
GTSAM_MAKE_VECTOR_DEFS(7);
|
||||
GTSAM_MAKE_VECTOR_DEFS(8);
|
||||
GTSAM_MAKE_VECTOR_DEFS(9);
|
||||
GTSAM_MAKE_VECTOR_DEFS(10);
|
||||
GTSAM_MAKE_VECTOR_DEFS(11);
|
||||
GTSAM_MAKE_VECTOR_DEFS(12);
|
||||
GTSAM_MAKE_VECTOR_DEFS(4)
|
||||
GTSAM_MAKE_VECTOR_DEFS(5)
|
||||
GTSAM_MAKE_VECTOR_DEFS(6)
|
||||
GTSAM_MAKE_VECTOR_DEFS(7)
|
||||
GTSAM_MAKE_VECTOR_DEFS(8)
|
||||
GTSAM_MAKE_VECTOR_DEFS(9)
|
||||
GTSAM_MAKE_VECTOR_DEFS(10)
|
||||
GTSAM_MAKE_VECTOR_DEFS(11)
|
||||
GTSAM_MAKE_VECTOR_DEFS(12)
|
||||
|
||||
typedef Eigen::VectorBlock<Vector> SubVector;
|
||||
typedef Eigen::VectorBlock<const Vector> ConstSubVector;
|
||||
|
|
|
@ -32,7 +32,7 @@ void testDefaultChart(TestResult& result_,
|
|||
const std::string& name_,
|
||||
const T& value) {
|
||||
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(T);
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(T)
|
||||
|
||||
typedef typename gtsam::DefaultChart<T> Chart;
|
||||
typedef typename Chart::vector Vector;
|
||||
|
|
|
@ -158,9 +158,8 @@ void DepthFirstForestParallel(FOREST& forest, DATA& rootData,
|
|||
// Typedefs
|
||||
typedef typename FOREST::Node Node;
|
||||
|
||||
tbb::task::spawn_root_and_wait(
|
||||
internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre,
|
||||
visitorPost, problemSizeThreshold));
|
||||
internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre,
|
||||
visitorPost, problemSizeThreshold);
|
||||
#else
|
||||
DepthFirstForest(forest, rootData, visitorPre, visitorPost);
|
||||
#endif
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include <boost/make_shared.hpp>
|
||||
|
||||
#ifdef GTSAM_USE_TBB
|
||||
#include <tbb/task.h> // tbb::task, tbb::task_list
|
||||
#include <tbb/task_group.h> // tbb::task_group
|
||||
#include <tbb/scalable_allocator.h> // tbb::scalable_allocator
|
||||
|
||||
namespace gtsam {
|
||||
|
@ -34,7 +34,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
class PreOrderTask : public tbb::task
|
||||
class PreOrderTask
|
||||
{
|
||||
public:
|
||||
const boost::shared_ptr<NODE>& treeNode;
|
||||
|
@ -42,28 +42,30 @@ namespace gtsam {
|
|||
VISITOR_PRE& visitorPre;
|
||||
VISITOR_POST& visitorPost;
|
||||
int problemSizeThreshold;
|
||||
tbb::task_group& tg;
|
||||
bool makeNewTasks;
|
||||
|
||||
bool isPostOrderPhase;
|
||||
// Keep track of order phase across multiple calls to the same functor
|
||||
mutable bool isPostOrderPhase;
|
||||
|
||||
PreOrderTask(const boost::shared_ptr<NODE>& treeNode, const boost::shared_ptr<DATA>& myData,
|
||||
VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold,
|
||||
bool makeNewTasks = true)
|
||||
tbb::task_group& tg, bool makeNewTasks = true)
|
||||
: treeNode(treeNode),
|
||||
myData(myData),
|
||||
visitorPre(visitorPre),
|
||||
visitorPost(visitorPost),
|
||||
problemSizeThreshold(problemSizeThreshold),
|
||||
tg(tg),
|
||||
makeNewTasks(makeNewTasks),
|
||||
isPostOrderPhase(false) {}
|
||||
|
||||
tbb::task* execute() override
|
||||
void operator()() const
|
||||
{
|
||||
if(isPostOrderPhase)
|
||||
{
|
||||
// Run the post-order visitor since this task was recycled to run the post-order visitor
|
||||
(void) visitorPost(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -71,14 +73,10 @@ namespace gtsam {
|
|||
{
|
||||
if(!treeNode->children.empty())
|
||||
{
|
||||
// Allocate post-order task as a continuation
|
||||
isPostOrderPhase = true;
|
||||
recycle_as_continuation();
|
||||
|
||||
bool overThreshold = (treeNode->problemSize() >= problemSizeThreshold);
|
||||
|
||||
tbb::task* firstChild = 0;
|
||||
tbb::task_list childTasks;
|
||||
// If we have child tasks, start subtasks and wait for them to complete
|
||||
tbb::task_group ctg;
|
||||
for(const boost::shared_ptr<NODE>& child: treeNode->children)
|
||||
{
|
||||
// Process child in a subtask. Important: Run visitorPre before calling
|
||||
|
@ -86,37 +84,30 @@ namespace gtsam {
|
|||
// allocated an extra child, this causes a TBB error.
|
||||
boost::shared_ptr<DATA> childData = boost::allocate_shared<DATA>(
|
||||
tbb::scalable_allocator<DATA>(), visitorPre(child, *myData));
|
||||
tbb::task* childTask =
|
||||
new (allocate_child()) PreOrderTask(child, childData, visitorPre, visitorPost,
|
||||
problemSizeThreshold, overThreshold);
|
||||
if (firstChild)
|
||||
childTasks.push_back(*childTask);
|
||||
else
|
||||
firstChild = childTask;
|
||||
ctg.run(PreOrderTask(child, childData, visitorPre, visitorPost,
|
||||
problemSizeThreshold, ctg, overThreshold));
|
||||
}
|
||||
ctg.wait();
|
||||
|
||||
// If we have child tasks, start subtasks and wait for them to complete
|
||||
set_ref_count((int)treeNode->children.size());
|
||||
spawn(childTasks);
|
||||
return firstChild;
|
||||
// Allocate post-order task as a continuation
|
||||
isPostOrderPhase = true;
|
||||
tg.run(*this);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Run the post-order visitor in this task if we have no children
|
||||
(void) visitorPost(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Process this node and its children in this task
|
||||
processNodeRecursively(treeNode, *myData);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData)
|
||||
void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData) const
|
||||
{
|
||||
for(const boost::shared_ptr<NODE>& child: node->children)
|
||||
{
|
||||
|
@ -131,7 +122,7 @@ namespace gtsam {
|
|||
|
||||
/* ************************************************************************* */
|
||||
template<typename ROOTS, typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
class RootTask : public tbb::task
|
||||
class RootTask
|
||||
{
|
||||
public:
|
||||
const ROOTS& roots;
|
||||
|
@ -139,38 +130,31 @@ namespace gtsam {
|
|||
VISITOR_PRE& visitorPre;
|
||||
VISITOR_POST& visitorPost;
|
||||
int problemSizeThreshold;
|
||||
tbb::task_group& tg;
|
||||
RootTask(const ROOTS& roots, DATA& myData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost,
|
||||
int problemSizeThreshold) :
|
||||
int problemSizeThreshold, tbb::task_group& tg) :
|
||||
roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost),
|
||||
problemSizeThreshold(problemSizeThreshold) {}
|
||||
problemSizeThreshold(problemSizeThreshold), tg(tg) {}
|
||||
|
||||
tbb::task* execute() override
|
||||
void operator()() const
|
||||
{
|
||||
typedef PreOrderTask<NODE, DATA, VISITOR_PRE, VISITOR_POST> PreOrderTask;
|
||||
// Create data and tasks for our children
|
||||
tbb::task_list tasks;
|
||||
for(const boost::shared_ptr<NODE>& root: roots)
|
||||
{
|
||||
boost::shared_ptr<DATA> rootData = boost::allocate_shared<DATA>(tbb::scalable_allocator<DATA>(), visitorPre(root, myData));
|
||||
tasks.push_back(*new(allocate_child())
|
||||
PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold));
|
||||
tg.run(PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
|
||||
}
|
||||
// Set TBB ref count
|
||||
set_ref_count(1 + (int) roots.size());
|
||||
// Spawn tasks
|
||||
spawn_and_wait_for_all(tasks);
|
||||
// Return nullptr
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename NODE, typename ROOTS, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
|
||||
RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST>&
|
||||
CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
|
||||
void CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
|
||||
{
|
||||
typedef RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST> RootTask;
|
||||
return *new(tbb::task::allocate_root()) RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold);
|
||||
}
|
||||
tbb::task_group tg;
|
||||
tg.run_and_wait(RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ class GTSAM_EXPORT PinholeBaseK: public PinholeBase {
|
|||
|
||||
private:
|
||||
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION);
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION)
|
||||
|
||||
// Get dimensions of calibration type at compile time
|
||||
static const int DimK = FixedDimension<CALIBRATION>::value;
|
||||
|
|
|
@ -28,7 +28,7 @@ using namespace std;
|
|||
namespace gtsam {
|
||||
|
||||
/** instantiate concept checks */
|
||||
GTSAM_CONCEPT_POSE_INST(Pose2);
|
||||
GTSAM_CONCEPT_POSE_INST(Pose2)
|
||||
|
||||
static const Rot2 R_PI_2(Rot2::fromCosSin(0., 1.));
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ using std::vector;
|
|||
using Point3Pairs = vector<Point3Pair>;
|
||||
|
||||
/** instantiate concept checks */
|
||||
GTSAM_CONCEPT_POSE_INST(Pose3);
|
||||
GTSAM_CONCEPT_POSE_INST(Pose3)
|
||||
|
||||
/* ************************************************************************* */
|
||||
Pose3::Pose3(const Pose2& pose2) :
|
||||
|
@ -63,6 +63,46 @@ Matrix6 Pose3::AdjointMap() const {
|
|||
return adj;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Calculate AdjointMap applied to xi_b, with Jacobians
|
||||
Vector6 Pose3::Adjoint(const Vector6& xi_b, OptionalJacobian<6, 6> H_pose,
|
||||
OptionalJacobian<6, 6> H_xib) const {
|
||||
const Matrix6 Ad = AdjointMap();
|
||||
|
||||
// Jacobians
|
||||
// D1 Ad_T(xi_b) = D1 Ad_T Ad_I(xi_b) = Ad_T * D1 Ad_I(xi_b) = Ad_T * ad_xi_b
|
||||
// D2 Ad_T(xi_b) = Ad_T
|
||||
// See docs/math.pdf for more details.
|
||||
// In D1 calculation, we could be more efficient by writing it out, but do not
|
||||
// for readability
|
||||
if (H_pose) *H_pose = -Ad * adjointMap(xi_b);
|
||||
if (H_xib) *H_xib = Ad;
|
||||
|
||||
return Ad * xi_b;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/// The dual version of Adjoint
|
||||
Vector6 Pose3::AdjointTranspose(const Vector6& x, OptionalJacobian<6, 6> H_pose,
|
||||
OptionalJacobian<6, 6> H_x) const {
|
||||
const Matrix6 Ad = AdjointMap();
|
||||
const Vector6 AdTx = Ad.transpose() * x;
|
||||
|
||||
// Jacobians
|
||||
// See docs/math.pdf for more details.
|
||||
if (H_pose) {
|
||||
const auto w_T_hat = skewSymmetric(AdTx.head<3>()),
|
||||
v_T_hat = skewSymmetric(AdTx.tail<3>());
|
||||
*H_pose << w_T_hat, v_T_hat, //
|
||||
/* */ v_T_hat, Z_3x3;
|
||||
}
|
||||
if (H_x) {
|
||||
*H_x = Ad.transpose();
|
||||
}
|
||||
|
||||
return AdTx;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix6 Pose3::adjointMap(const Vector6& xi) {
|
||||
Matrix3 w_hat = skewSymmetric(xi(0), xi(1), xi(2));
|
||||
|
@ -75,7 +115,7 @@ Matrix6 Pose3::adjointMap(const Vector6& xi) {
|
|||
|
||||
/* ************************************************************************* */
|
||||
Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
||||
OptionalJacobian<6, 6> Hxi) {
|
||||
OptionalJacobian<6, 6> Hxi, OptionalJacobian<6, 6> H_y) {
|
||||
if (Hxi) {
|
||||
Hxi->setZero();
|
||||
for (int i = 0; i < 6; ++i) {
|
||||
|
@ -86,12 +126,14 @@ Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
|
|||
Hxi->col(i) = Gi * y;
|
||||
}
|
||||
}
|
||||
return adjointMap(xi) * y;
|
||||
const Matrix6& ad_xi = adjointMap(xi);
|
||||
if (H_y) *H_y = ad_xi;
|
||||
return ad_xi * y;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
|
||||
OptionalJacobian<6, 6> Hxi) {
|
||||
OptionalJacobian<6, 6> Hxi, OptionalJacobian<6, 6> H_y) {
|
||||
if (Hxi) {
|
||||
Hxi->setZero();
|
||||
for (int i = 0; i < 6; ++i) {
|
||||
|
@ -102,7 +144,9 @@ Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
|
|||
Hxi->col(i) = GTi * y;
|
||||
}
|
||||
}
|
||||
return adjointMap(xi).transpose() * y;
|
||||
const Matrix6& adT_xi = adjointMap(xi).transpose();
|
||||
if (H_y) *H_y = adT_xi;
|
||||
return adT_xi * y;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -145,15 +145,22 @@ public:
|
|||
* Calculate Adjoint map, transforming a twist in this pose's (i.e, body) frame to the world spatial frame
|
||||
* Ad_pose is 6*6 matrix that when applied to twist xi \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$, returns Ad_pose(xi)
|
||||
*/
|
||||
Matrix6 AdjointMap() const; /// FIXME Not tested - marked as incorrect
|
||||
Matrix6 AdjointMap() const;
|
||||
|
||||
/**
|
||||
* Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a body-fixed velocity, transforming it to the spatial frame
|
||||
* Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a
|
||||
* body-fixed velocity, transforming it to the spatial frame
|
||||
* \f$ \xi^s = g*\xi^b*g^{-1} = Ad_g * \xi^b \f$
|
||||
* Note that H_xib = AdjointMap()
|
||||
*/
|
||||
Vector6 Adjoint(const Vector6& xi_b) const {
|
||||
return AdjointMap() * xi_b;
|
||||
} /// FIXME Not tested - marked as incorrect
|
||||
Vector6 Adjoint(const Vector6& xi_b,
|
||||
OptionalJacobian<6, 6> H_this = boost::none,
|
||||
OptionalJacobian<6, 6> H_xib = boost::none) const;
|
||||
|
||||
/// The dual version of Adjoint
|
||||
Vector6 AdjointTranspose(const Vector6& x,
|
||||
OptionalJacobian<6, 6> H_this = boost::none,
|
||||
OptionalJacobian<6, 6> H_x = boost::none) const;
|
||||
|
||||
/**
|
||||
* Compute the [ad(w,v)] operator as defined in [Kobilarov09siggraph], pg 11
|
||||
|
@ -170,13 +177,14 @@ public:
|
|||
* and its inverse transpose in the discrete Euler Poincare' (DEP) operator.
|
||||
*
|
||||
*/
|
||||
static Matrix6 adjointMap(const Vector6 &xi);
|
||||
static Matrix6 adjointMap(const Vector6& xi);
|
||||
|
||||
/**
|
||||
* Action of the adjointMap on a Lie-algebra vector y, with optional derivatives
|
||||
*/
|
||||
static Vector6 adjoint(const Vector6 &xi, const Vector6 &y,
|
||||
OptionalJacobian<6, 6> Hxi = boost::none);
|
||||
static Vector6 adjoint(const Vector6& xi, const Vector6& y,
|
||||
OptionalJacobian<6, 6> Hxi = boost::none,
|
||||
OptionalJacobian<6, 6> H_y = boost::none);
|
||||
|
||||
// temporary fix for wrappers until case issue is resolved
|
||||
static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);}
|
||||
|
@ -186,7 +194,8 @@ public:
|
|||
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
|
||||
*/
|
||||
static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y,
|
||||
OptionalJacobian<6, 6> Hxi = boost::none);
|
||||
OptionalJacobian<6, 6> Hxi = boost::none,
|
||||
OptionalJacobian<6, 6> H_y = boost::none);
|
||||
|
||||
/// Derivative of Expmap
|
||||
static Matrix6 ExpmapDerivative(const Vector6& xi);
|
||||
|
|
|
@ -72,5 +72,5 @@ private:
|
|||
|
||||
/** Pose Concept macros */
|
||||
#define GTSAM_CONCEPT_POSE_INST(T) template class gtsam::PoseConcept<T>;
|
||||
#define GTSAM_CONCEPT_POSE_TYPE(T) typedef gtsam::PoseConcept<T> _gtsam_PoseConcept##T;
|
||||
#define GTSAM_CONCEPT_POSE_TYPE(T) using _gtsam_PoseConcept##T = gtsam::PoseConcept<T>;
|
||||
|
||||
|
|
|
@ -473,6 +473,9 @@ class Pose3 {
|
|||
Vector logmap(const gtsam::Pose3& pose);
|
||||
Matrix AdjointMap() const;
|
||||
Vector Adjoint(Vector xi) const;
|
||||
Vector AdjointTranspose(Vector xi) const;
|
||||
static Matrix adjointMap(Vector xi);
|
||||
static Vector adjoint(Vector xi, Vector y);
|
||||
static Matrix adjointMap_(Vector xi);
|
||||
static Vector adjoint_(Vector xi, Vector y);
|
||||
static Vector adjointTranspose(Vector xi, Vector y);
|
||||
|
|
|
@ -145,6 +145,81 @@ TEST(Pose3, Adjoint_full)
|
|||
EXPECT(assert_equal(expected3, Pose3::Expmap(xiprime3), 1e-6));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check Adjoint numerical derivatives
|
||||
TEST(Pose3, Adjoint_jacobians)
|
||||
{
|
||||
Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished();
|
||||
|
||||
// Check evaluation sanity check
|
||||
EQUALITY(static_cast<gtsam::Vector>(T.AdjointMap() * xi), T.Adjoint(xi));
|
||||
EQUALITY(static_cast<gtsam::Vector>(T2.AdjointMap() * xi), T2.Adjoint(xi));
|
||||
EQUALITY(static_cast<gtsam::Vector>(T3.AdjointMap() * xi), T3.Adjoint(xi));
|
||||
|
||||
// Check jacobians
|
||||
Matrix6 actualH1, actualH2, expectedH1, expectedH2;
|
||||
std::function<Vector6(const Pose3&, const Vector6&)> Adjoint_proxy =
|
||||
[&](const Pose3& T, const Vector6& xi) { return T.Adjoint(xi); };
|
||||
|
||||
T.Adjoint(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(Adjoint_proxy, T, xi);
|
||||
expectedH2 = numericalDerivative22(Adjoint_proxy, T, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T2.Adjoint(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(Adjoint_proxy, T2, xi);
|
||||
expectedH2 = numericalDerivative22(Adjoint_proxy, T2, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T3.Adjoint(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(Adjoint_proxy, T3, xi);
|
||||
expectedH2 = numericalDerivative22(Adjoint_proxy, T3, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check AdjointTranspose and jacobians
|
||||
TEST(Pose3, AdjointTranspose)
|
||||
{
|
||||
Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished();
|
||||
|
||||
// Check evaluation
|
||||
EQUALITY(static_cast<Vector>(T.AdjointMap().transpose() * xi),
|
||||
T.AdjointTranspose(xi));
|
||||
EQUALITY(static_cast<Vector>(T2.AdjointMap().transpose() * xi),
|
||||
T2.AdjointTranspose(xi));
|
||||
EQUALITY(static_cast<Vector>(T3.AdjointMap().transpose() * xi),
|
||||
T3.AdjointTranspose(xi));
|
||||
|
||||
// Check jacobians
|
||||
Matrix6 actualH1, actualH2, expectedH1, expectedH2;
|
||||
std::function<Vector6(const Pose3&, const Vector6&)> AdjointTranspose_proxy =
|
||||
[&](const Pose3& T, const Vector6& xi) {
|
||||
return T.AdjointTranspose(xi);
|
||||
};
|
||||
|
||||
T.AdjointTranspose(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T, xi);
|
||||
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T2.AdjointTranspose(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T2, xi);
|
||||
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T2, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
|
||||
T3.AdjointTranspose(xi, actualH1, actualH2);
|
||||
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T3, xi);
|
||||
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T3, xi);
|
||||
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
|
||||
EXPECT(assert_equal(expectedH2, actualH2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// assert that T*wedge(xi)*T^-1 is equal to wedge(Ad_T(xi))
|
||||
TEST(Pose3, Adjoint_hat)
|
||||
|
@ -837,16 +912,20 @@ Vector6 testDerivAdjoint(const Vector6& xi, const Vector6& v) {
|
|||
}
|
||||
|
||||
TEST( Pose3, adjoint) {
|
||||
Vector expected = testDerivAdjoint(screwPose3::xi, screwPose3::xi);
|
||||
Vector6 v = (Vector6() << 1, 2, 3, 4, 5, 6).finished();
|
||||
Vector expected = testDerivAdjoint(screwPose3::xi, v);
|
||||
|
||||
Matrix actualH;
|
||||
Vector actual = Pose3::adjoint(screwPose3::xi, screwPose3::xi, actualH);
|
||||
Matrix actualH1, actualH2;
|
||||
Vector actual = Pose3::adjoint(screwPose3::xi, v, actualH1, actualH2);
|
||||
|
||||
Matrix numericalH = numericalDerivative21<Vector6, Vector6, Vector6>(
|
||||
testDerivAdjoint, screwPose3::xi, screwPose3::xi, 1e-5);
|
||||
Matrix numericalH1 = numericalDerivative21<Vector6, Vector6, Vector6>(
|
||||
testDerivAdjoint, screwPose3::xi, v, 1e-5);
|
||||
Matrix numericalH2 = numericalDerivative22<Vector6, Vector6, Vector6>(
|
||||
testDerivAdjoint, screwPose3::xi, v, 1e-5);
|
||||
|
||||
EXPECT(assert_equal(expected,actual,1e-5));
|
||||
EXPECT(assert_equal(numericalH,actualH,1e-5));
|
||||
EXPECT(assert_equal(numericalH1,actualH1,1e-5));
|
||||
EXPECT(assert_equal(numericalH2,actualH2,1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -859,14 +938,17 @@ TEST( Pose3, adjointTranspose) {
|
|||
Vector v = (Vector(6) << 0.04, 0.05, 0.06, 4.0, 5.0, 6.0).finished();
|
||||
Vector expected = testDerivAdjointTranspose(xi, v);
|
||||
|
||||
Matrix actualH;
|
||||
Vector actual = Pose3::adjointTranspose(xi, v, actualH);
|
||||
Matrix actualH1, actualH2;
|
||||
Vector actual = Pose3::adjointTranspose(xi, v, actualH1, actualH2);
|
||||
|
||||
Matrix numericalH = numericalDerivative21<Vector6, Vector6, Vector6>(
|
||||
Matrix numericalH1 = numericalDerivative21<Vector6, Vector6, Vector6>(
|
||||
testDerivAdjointTranspose, xi, v, 1e-5);
|
||||
Matrix numericalH2 = numericalDerivative22<Vector6, Vector6, Vector6>(
|
||||
testDerivAdjointTranspose, xi, v, 1e-5);
|
||||
|
||||
EXPECT(assert_equal(expected,actual,1e-15));
|
||||
EXPECT(assert_equal(numericalH,actualH,1e-5));
|
||||
EXPECT(assert_equal(numericalH1,actualH1,1e-5));
|
||||
EXPECT(assert_equal(numericalH2,actualH2,1e-5));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -110,7 +110,7 @@ class ClusterTree {
|
|||
typedef sharedCluster sharedNode;
|
||||
|
||||
/** concept check */
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(FactorType);
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(FactorType)
|
||||
|
||||
protected:
|
||||
FastVector<sharedNode> roots_;
|
||||
|
|
|
@ -36,17 +36,17 @@ namespace gtsam {
|
|||
// no Ordering is provided. When removing optional from VariableIndex, create VariableIndex
|
||||
// before creating ordering.
|
||||
VariableIndex computedVariableIndex(asDerived());
|
||||
return eliminateSequential(function, computedVariableIndex, orderingType);
|
||||
return eliminateSequential(orderingType, function, computedVariableIndex);
|
||||
}
|
||||
else {
|
||||
// Compute an ordering and call this function again. We are guaranteed to have a
|
||||
// VariableIndex already here because we computed one if needed in the previous 'if' block.
|
||||
if (orderingType == Ordering::METIS) {
|
||||
Ordering computedOrdering = Ordering::Metis(asDerived());
|
||||
return eliminateSequential(computedOrdering, function, variableIndex, orderingType);
|
||||
return eliminateSequential(computedOrdering, function, variableIndex);
|
||||
} else {
|
||||
Ordering computedOrdering = Ordering::Colamd(*variableIndex);
|
||||
return eliminateSequential(computedOrdering, function, variableIndex, orderingType);
|
||||
return eliminateSequential(computedOrdering, function, variableIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -78,29 +78,31 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
template<class FACTORGRAPH>
|
||||
boost::shared_ptr<typename EliminateableFactorGraph<FACTORGRAPH>::BayesTreeType>
|
||||
EliminateableFactorGraph<FACTORGRAPH>::eliminateMultifrontal(
|
||||
OptionalOrderingType orderingType, const Eliminate& function,
|
||||
OptionalVariableIndex variableIndex) const
|
||||
{
|
||||
if(!variableIndex) {
|
||||
// If no VariableIndex provided, compute one and call this function again IMPORTANT: we check
|
||||
// for no variable index first so that it's always computed if we need to call COLAMD because
|
||||
// no Ordering is provided. When removing optional from VariableIndex, create VariableIndex
|
||||
// before creating ordering.
|
||||
template <class FACTORGRAPH>
|
||||
boost::shared_ptr<
|
||||
typename EliminateableFactorGraph<FACTORGRAPH>::BayesTreeType>
|
||||
EliminateableFactorGraph<FACTORGRAPH>::eliminateMultifrontal(
|
||||
OptionalOrderingType orderingType, const Eliminate& function,
|
||||
OptionalVariableIndex variableIndex) const {
|
||||
if (!variableIndex) {
|
||||
// If no VariableIndex provided, compute one and call this function again
|
||||
// IMPORTANT: we check for no variable index first so that it's always
|
||||
// computed if we need to call COLAMD because no Ordering is provided.
|
||||
// When removing optional from VariableIndex, create VariableIndex before
|
||||
// creating ordering.
|
||||
VariableIndex computedVariableIndex(asDerived());
|
||||
return eliminateMultifrontal(function, computedVariableIndex, orderingType);
|
||||
}
|
||||
else {
|
||||
// Compute an ordering and call this function again. We are guaranteed to have a
|
||||
// VariableIndex already here because we computed one if needed in the previous 'if' block.
|
||||
return eliminateMultifrontal(orderingType, function,
|
||||
computedVariableIndex);
|
||||
} else {
|
||||
// Compute an ordering and call this function again. We are guaranteed to
|
||||
// have a VariableIndex already here because we computed one if needed in
|
||||
// the previous 'if' block.
|
||||
if (orderingType == Ordering::METIS) {
|
||||
Ordering computedOrdering = Ordering::Metis(asDerived());
|
||||
return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType);
|
||||
return eliminateMultifrontal(computedOrdering, function, variableIndex);
|
||||
} else {
|
||||
Ordering computedOrdering = Ordering::Colamd(*variableIndex);
|
||||
return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType);
|
||||
return eliminateMultifrontal(computedOrdering, function, variableIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -273,7 +275,7 @@ namespace gtsam {
|
|||
else
|
||||
{
|
||||
// No ordering was provided for the unmarginalized variables, so order them with COLAMD.
|
||||
return factorGraph->eliminateSequential(function);
|
||||
return factorGraph->eliminateSequential(Ordering::COLAMD, function);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -340,7 +342,7 @@ namespace gtsam {
|
|||
else
|
||||
{
|
||||
// No ordering was provided for the unmarginalized variables, so order them with COLAMD.
|
||||
return factorGraph->eliminateMultifrontal(function);
|
||||
return factorGraph->eliminateMultifrontal(Ordering::COLAMD, function);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -288,6 +288,7 @@ namespace gtsam {
|
|||
FactorGraphType& asDerived() { return static_cast<FactorGraphType&>(*this); }
|
||||
|
||||
public:
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
|
||||
/** \deprecated ordering and orderingType shouldn't both be specified */
|
||||
boost::shared_ptr<BayesNetType> eliminateSequential(
|
||||
const Ordering& ordering,
|
||||
|
@ -339,6 +340,7 @@ namespace gtsam {
|
|||
OptionalVariableIndex variableIndex = boost::none) const {
|
||||
return marginalMultifrontalBayesTree(variables, function, variableIndex);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ namespace gtsam {
|
|||
|
||||
protected:
|
||||
/** concept check */
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(FactorType);
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(FactorType)
|
||||
|
||||
FastVector<sharedNode> roots_;
|
||||
FastVector<sharedFactor> remainingFactors_;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
*/
|
||||
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/linear/GaussianBayesTree.h>
|
||||
#include <gtsam/linear/GaussianEliminationTree.h>
|
||||
#include <gtsam/linear/GaussianJunctionTree.h>
|
||||
|
@ -290,10 +289,11 @@ namespace gtsam {
|
|||
return blocks;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/* ************************************************************************ */
|
||||
VectorValues GaussianFactorGraph::optimize(const Eliminate& function) const {
|
||||
gttic(GaussianFactorGraph_optimize);
|
||||
return BaseEliminateable::eliminateMultifrontal(function)->optimize();
|
||||
return BaseEliminateable::eliminateMultifrontal(Ordering::COLAMD, function)
|
||||
->optimize();
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -504,10 +504,32 @@ namespace gtsam {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
/** \deprecated */
|
||||
VectorValues GaussianFactorGraph::optimize(boost::none_t,
|
||||
const Eliminate& function) const {
|
||||
return optimize(function);
|
||||
void GaussianFactorGraph::printErrors(
|
||||
const VectorValues& values, const std::string& str,
|
||||
const KeyFormatter& keyFormatter,
|
||||
const std::function<bool(const Factor* /*factor*/,
|
||||
double /*whitenedError*/, size_t /*index*/)>&
|
||||
printCondition) const {
|
||||
cout << str << "size: " << size() << endl << endl;
|
||||
for (size_t i = 0; i < (*this).size(); i++) {
|
||||
const sharedFactor& factor = (*this)[i];
|
||||
const double errorValue =
|
||||
(factor != nullptr ? (*this)[i]->error(values) : .0);
|
||||
if (!printCondition(factor.get(), errorValue, i))
|
||||
continue; // User-provided filter did not pass
|
||||
|
||||
stringstream ss;
|
||||
ss << "Factor " << i << ": ";
|
||||
if (factor == nullptr) {
|
||||
cout << "nullptr"
|
||||
<< "\n";
|
||||
} else {
|
||||
factor->print(ss.str(), keyFormatter);
|
||||
cout << "error = " << errorValue << "\n";
|
||||
}
|
||||
cout << endl; // only one "endl" at end might be faster, \n for each
|
||||
// factor
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -21,12 +21,13 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/inference/FactorGraph.h>
|
||||
#include <gtsam/inference/EliminateableFactorGraph.h>
|
||||
#include <gtsam/inference/FactorGraph.h>
|
||||
#include <gtsam/linear/Errors.h> // Included here instead of fw-declared so we can use Errors::iterator
|
||||
#include <gtsam/linear/GaussianFactor.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
#include <gtsam/linear/Errors.h> // Included here instead of fw-declared so we can use Errors::iterator
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
@ -375,6 +376,14 @@ namespace gtsam {
|
|||
/** In-place version e <- A*x that takes an iterator. */
|
||||
void multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const;
|
||||
|
||||
void printErrors(
|
||||
const VectorValues& x,
|
||||
const std::string& str = "GaussianFactorGraph: ",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
|
||||
const std::function<bool(const Factor* /*factor*/,
|
||||
double /*whitenedError*/, size_t /*index*/)>&
|
||||
printCondition =
|
||||
[](const Factor*, double, size_t) { return true; }) const;
|
||||
/// @}
|
||||
|
||||
private:
|
||||
|
@ -387,9 +396,14 @@ namespace gtsam {
|
|||
|
||||
public:
|
||||
|
||||
/** \deprecated */
|
||||
VectorValues optimize(boost::none_t,
|
||||
const Eliminate& function = EliminationTraitsType::DefaultEliminate) const;
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
|
||||
/** \deprecated */
|
||||
VectorValues optimize(boost::none_t,
|
||||
const Eliminate& function =
|
||||
EliminationTraitsType::DefaultEliminate) const {
|
||||
return optimize(function);
|
||||
}
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -302,6 +302,7 @@ virtual class JacobianFactor : gtsam::GaussianFactor {
|
|||
void print(string s = "", const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
void printKeys(string s) const;
|
||||
gtsam::KeyVector& keys() const;
|
||||
bool equals(const gtsam::GaussianFactor& lf, double tol) const;
|
||||
size_t size() const;
|
||||
Vector unweighted_error(const gtsam::VectorValues& c) const;
|
||||
|
@ -401,6 +402,7 @@ class GaussianFactorGraph {
|
|||
// error and probability
|
||||
double error(const gtsam::VectorValues& c) const;
|
||||
double probPrime(const gtsam::VectorValues& c) const;
|
||||
void printErrors(const gtsam::VectorValues& c, string str = "GaussianFactorGraph: ", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const;
|
||||
|
||||
gtsam::GaussianFactorGraph clone() const;
|
||||
gtsam::GaussianFactorGraph negate() const;
|
||||
|
@ -513,9 +515,9 @@ virtual class GaussianBayesNet {
|
|||
size_t size() const;
|
||||
|
||||
// FactorGraph derived interface
|
||||
// size_t size() const;
|
||||
gtsam::GaussianConditional* at(size_t idx) const;
|
||||
gtsam::KeySet keys() const;
|
||||
gtsam::KeyVector keyVector() const;
|
||||
bool exists(size_t idx) const;
|
||||
|
||||
void saveGraph(const string& s) const;
|
||||
|
|
|
@ -39,14 +39,14 @@ using namespace gtsam::serializationTestHelpers;
|
|||
/* ************************************************************************* */
|
||||
// Export Noisemodels
|
||||
// See http://www.boost.org/doc/libs/1_32_0/libs/serialization/doc/special.html
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
|
||||
|
||||
/* ************************************************************************* */
|
||||
// example noise models
|
||||
|
@ -129,9 +129,9 @@ TEST (Serialization, SharedDiagonal_noiseModels) {
|
|||
|
||||
/* Create GUIDs for factors */
|
||||
/* ************************************************************************* */
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional")
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST (Serialization, linear_factors) {
|
||||
|
|
|
@ -257,5 +257,5 @@ std::ostream& operator<<(std::ostream& os, const CombinedImuFactor& f) {
|
|||
/// namespace gtsam
|
||||
|
||||
/// Boost serialization export definition for derived class
|
||||
BOOST_CLASS_EXPORT_IMPLEMENT(gtsam::PreintegrationCombinedParams);
|
||||
BOOST_CLASS_EXPORT_IMPLEMENT(gtsam::PreintegrationCombinedParams)
|
||||
|
||||
|
|
|
@ -353,4 +353,4 @@ struct traits<CombinedImuFactor> : public Testable<CombinedImuFactor> {};
|
|||
} // namespace gtsam
|
||||
|
||||
/// Add Boost serialization export key (declaration) for derived class
|
||||
BOOST_CLASS_EXPORT_KEY(gtsam::PreintegrationCombinedParams);
|
||||
BOOST_CLASS_EXPORT_KEY(gtsam::PreintegrationCombinedParams)
|
||||
|
|
|
@ -88,6 +88,8 @@ virtual class PreintegratedRotationParams {
|
|||
virtual class PreintegrationParams : gtsam::PreintegratedRotationParams {
|
||||
PreintegrationParams(Vector n_gravity);
|
||||
|
||||
gtsam::Vector n_gravity;
|
||||
|
||||
static gtsam::PreintegrationParams* MakeSharedD(double g);
|
||||
static gtsam::PreintegrationParams* MakeSharedU(double g);
|
||||
static gtsam::PreintegrationParams* MakeSharedD(); // default g = 9.81
|
||||
|
|
|
@ -66,7 +66,7 @@ TEST( Rot3AttitudeFactor, Constructor ) {
|
|||
/* ************************************************************************* */
|
||||
// Export Noisemodels
|
||||
// See http://www.boost.org/doc/libs/1_32_0/libs/serialization/doc/special.html
|
||||
BOOST_CLASS_EXPORT(gtsam::noiseModel::Isotropic);
|
||||
BOOST_CLASS_EXPORT(gtsam::noiseModel::Isotropic)
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(Rot3AttitudeFactor, Serialization) {
|
||||
|
|
|
@ -72,7 +72,7 @@ TEST( GPSFactor, Constructor ) {
|
|||
|
||||
// Calculate numerical derivatives
|
||||
Matrix expectedH = numericalDerivative11<Vector,Pose3>(
|
||||
std::bind(&GPSFactor::evaluateError, &factor, _1, boost::none), T);
|
||||
std::bind(&GPSFactor::evaluateError, &factor, std::placeholders::_1, boost::none), T);
|
||||
|
||||
// Use the factor to calculate the derivative
|
||||
Matrix actualH;
|
||||
|
@ -101,7 +101,7 @@ TEST( GPSFactor2, Constructor ) {
|
|||
|
||||
// Calculate numerical derivatives
|
||||
Matrix expectedH = numericalDerivative11<Vector,NavState>(
|
||||
std::bind(&GPSFactor2::evaluateError, &factor, _1, boost::none), T);
|
||||
std::bind(&GPSFactor2::evaluateError, &factor, std::placeholders::_1, boost::none), T);
|
||||
|
||||
// Use the factor to calculate the derivative
|
||||
Matrix actualH;
|
||||
|
|
|
@ -31,16 +31,16 @@ using namespace gtsam;
|
|||
using namespace gtsam::serializationTestHelpers;
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained,
|
||||
"gtsam_noiseModel_Constrained");
|
||||
"gtsam_noiseModel_Constrained")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal,
|
||||
"gtsam_noiseModel_Diagonal");
|
||||
"gtsam_noiseModel_Diagonal")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian,
|
||||
"gtsam_noiseModel_Gaussian");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit");
|
||||
"gtsam_noiseModel_Gaussian")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,
|
||||
"gtsam_noiseModel_Isotropic");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
||||
"gtsam_noiseModel_Isotropic")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
|
||||
|
||||
template <typename P>
|
||||
P getPreintegratedMeasurements() {
|
||||
|
|
|
@ -64,7 +64,7 @@ TEST( MagFactor, unrotate ) {
|
|||
Point3 expected(22735.5, 314.502, 44202.5);
|
||||
EXPECT( assert_equal(expected, MagFactor::unrotate(theta,nM,H),1e-1));
|
||||
EXPECT( assert_equal(numericalDerivative11<Point3,Rot2> //
|
||||
(std::bind(&MagFactor::unrotate, _1, nM, none), theta), H, 1e-6));
|
||||
(std::bind(&MagFactor::unrotate, std::placeholders::_1, nM, none), theta), H, 1e-6));
|
||||
}
|
||||
|
||||
// *************************************************************************
|
||||
|
@ -76,35 +76,35 @@ TEST( MagFactor, Factors ) {
|
|||
MagFactor f(1, measured, s, dir, bias, model);
|
||||
EXPECT( assert_equal(Z_3x1,f.evaluateError(theta,H1),1e-5));
|
||||
EXPECT( assert_equal((Matrix)numericalDerivative11<Vector,Rot2> //
|
||||
(std::bind(&MagFactor::evaluateError, &f, _1, none), theta), H1, 1e-7));
|
||||
(std::bind(&MagFactor::evaluateError, &f, std::placeholders::_1, none), theta), H1, 1e-7));
|
||||
|
||||
// MagFactor1
|
||||
MagFactor1 f1(1, measured, s, dir, bias, model);
|
||||
EXPECT( assert_equal(Z_3x1,f1.evaluateError(nRb,H1),1e-5));
|
||||
EXPECT( assert_equal(numericalDerivative11<Vector,Rot3> //
|
||||
(std::bind(&MagFactor1::evaluateError, &f1, _1, none), nRb), H1, 1e-7));
|
||||
(std::bind(&MagFactor1::evaluateError, &f1, std::placeholders::_1, none), nRb), H1, 1e-7));
|
||||
|
||||
// MagFactor2
|
||||
MagFactor2 f2(1, 2, measured, nRb, model);
|
||||
EXPECT( assert_equal(Z_3x1,f2.evaluateError(scaled,bias,H1,H2),1e-5));
|
||||
EXPECT( assert_equal(numericalDerivative11<Vector,Point3> //
|
||||
(std::bind(&MagFactor2::evaluateError, &f2, _1, bias, none, none), scaled),//
|
||||
(std::bind(&MagFactor2::evaluateError, &f2, std::placeholders::_1, bias, none, none), scaled),//
|
||||
H1, 1e-7));
|
||||
EXPECT( assert_equal(numericalDerivative11<Vector,Point3> //
|
||||
(std::bind(&MagFactor2::evaluateError, &f2, scaled, _1, none, none), bias),//
|
||||
(std::bind(&MagFactor2::evaluateError, &f2, scaled, std::placeholders::_1, none, none), bias),//
|
||||
H2, 1e-7));
|
||||
|
||||
// MagFactor2
|
||||
MagFactor3 f3(1, 2, 3, measured, nRb, model);
|
||||
EXPECT(assert_equal(Z_3x1,f3.evaluateError(s,dir,bias,H1,H2,H3),1e-5));
|
||||
EXPECT(assert_equal((Matrix)numericalDerivative11<Vector,double> //
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, _1, dir, bias, none, none, none), s),//
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, std::placeholders::_1, dir, bias, none, none, none), s),//
|
||||
H1, 1e-7));
|
||||
EXPECT(assert_equal(numericalDerivative11<Vector,Unit3> //
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, s, _1, bias, none, none, none), dir),//
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, s, std::placeholders::_1, bias, none, none, none), dir),//
|
||||
H2, 1e-7));
|
||||
EXPECT(assert_equal(numericalDerivative11<Vector,Point3> //
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, s, dir, _1, none, none, none), bias),//
|
||||
(std::bind(&MagFactor3::evaluateError, &f3, s, dir, std::placeholders::_1, none, none, none), bias),//
|
||||
H3, 1e-7));
|
||||
}
|
||||
|
||||
|
|
|
@ -101,4 +101,4 @@ private:
|
|||
}
|
||||
};
|
||||
|
||||
};
|
||||
}
|
||||
|
|
|
@ -246,6 +246,18 @@ struct apply_compose {
|
|||
return x.compose(y, H1, H2);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
struct apply_compose<double> {
|
||||
double operator()(const double& x, const double& y,
|
||||
OptionalJacobian<1, 1> H1 = boost::none,
|
||||
OptionalJacobian<1, 1> H2 = boost::none) const {
|
||||
if (H1) H1->setConstant(y);
|
||||
if (H2) H2->setConstant(x);
|
||||
return x * y;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
// Global methods:
|
||||
|
|
|
@ -142,8 +142,9 @@ class GTSAM_EXPORT GncOptimizer {
|
|||
* provides an extra interface for the user to initialize the weightst
|
||||
* */
|
||||
void setWeights(const Vector w) {
|
||||
if(w.size() != nfg_.size()){
|
||||
throw std::runtime_error("GncOptimizer::setWeights: the number of specified weights"
|
||||
if (size_t(w.size()) != nfg_.size()) {
|
||||
throw std::runtime_error(
|
||||
"GncOptimizer::setWeights: the number of specified weights"
|
||||
" does not match the size of the factor graph.");
|
||||
}
|
||||
weights_ = w;
|
||||
|
|
|
@ -80,11 +80,15 @@ Marginals::Marginals(const GaussianFactorGraph& graph, const VectorValues& solut
|
|||
|
||||
/* ************************************************************************* */
|
||||
void Marginals::computeBayesTree() {
|
||||
// The default ordering to use.
|
||||
const Ordering::OrderingType defaultOrderingType = Ordering::COLAMD;
|
||||
// Compute BayesTree
|
||||
if(factorization_ == CHOLESKY)
|
||||
bayesTree_ = *graph_.eliminateMultifrontal(EliminatePreferCholesky);
|
||||
else if(factorization_ == QR)
|
||||
bayesTree_ = *graph_.eliminateMultifrontal(EliminateQR);
|
||||
if (factorization_ == CHOLESKY)
|
||||
bayesTree_ = *graph_.eliminateMultifrontal(defaultOrderingType,
|
||||
EliminatePreferCholesky);
|
||||
else if (factorization_ == QR)
|
||||
bayesTree_ =
|
||||
*graph_.eliminateMultifrontal(defaultOrderingType, EliminateQR);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -131,6 +131,7 @@ protected:
|
|||
void computeBayesTree(const Ordering& ordering);
|
||||
|
||||
public:
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
|
||||
/** \deprecated argument order changed due to removing boost::optional<Ordering> */
|
||||
Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization,
|
||||
const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {}
|
||||
|
@ -142,6 +143,7 @@ public:
|
|||
/** \deprecated argument order changed due to removing boost::optional<Ordering> */
|
||||
Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, Factorization factorization,
|
||||
const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {}
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -149,7 +149,7 @@ boost::tuple<V, int> nonlinearConjugateGradient(const S &system,
|
|||
const V &initial, const NonlinearOptimizerParams ¶ms,
|
||||
const bool singleIteration, const bool gradientDescent = false) {
|
||||
|
||||
// GTSAM_CONCEPT_MANIFOLD_TYPE(V);
|
||||
// GTSAM_CONCEPT_MANIFOLD_TYPE(V)
|
||||
|
||||
size_t iteration = 0;
|
||||
|
||||
|
|
|
@ -219,7 +219,6 @@ protected:
|
|||
X value_; /// fixed value for variable
|
||||
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(X)
|
||||
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(X)
|
||||
|
||||
public:
|
||||
|
|
|
@ -265,6 +265,7 @@ namespace gtsam {
|
|||
|
||||
public:
|
||||
|
||||
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
|
||||
/** \deprecated */
|
||||
boost::shared_ptr<HessianFactor> linearizeToHessianFactor(
|
||||
const Values& values, boost::none_t, const Dampen& dampen = nullptr) const
|
||||
|
@ -274,6 +275,7 @@ namespace gtsam {
|
|||
Values updateCholesky(const Values& values, boost::none_t,
|
||||
const Dampen& dampen = nullptr) const
|
||||
{return updateCholesky(values, dampen);}
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -147,11 +147,13 @@ VectorValues NonlinearOptimizer::solve(const GaussianFactorGraph& gfg,
|
|||
} else if (params.isSequential()) {
|
||||
// Sequential QR or Cholesky (decided by params.getEliminationFunction())
|
||||
if (params.ordering)
|
||||
delta = gfg.eliminateSequential(*params.ordering, params.getEliminationFunction(),
|
||||
boost::none, params.orderingType)->optimize();
|
||||
delta = gfg.eliminateSequential(*params.ordering,
|
||||
params.getEliminationFunction())
|
||||
->optimize();
|
||||
else
|
||||
delta = gfg.eliminateSequential(params.getEliminationFunction(), boost::none,
|
||||
params.orderingType)->optimize();
|
||||
delta = gfg.eliminateSequential(params.orderingType,
|
||||
params.getEliminationFunction())
|
||||
->optimize();
|
||||
} else if (params.isIterative()) {
|
||||
// Conjugate Gradient -> needs params.iterativeParams
|
||||
if (!params.iterativeParams)
|
||||
|
|
|
@ -35,8 +35,7 @@ namespace gtsam {
|
|||
* zero errors anyway. However, it means that below will only be exact for the correct measurement.
|
||||
*/
|
||||
JacobianFactor linearizeNumerically(const NoiseModelFactor& factor,
|
||||
const Values& values, double delta = 1e-5) {
|
||||
|
||||
const Values& values, double delta = 1e-5) {
|
||||
// We will fill a vector of key/Jacobians pairs (a map would sort)
|
||||
std::vector<std::pair<Key, Matrix> > jacobians;
|
||||
|
||||
|
@ -46,24 +45,24 @@ JacobianFactor linearizeNumerically(const NoiseModelFactor& factor,
|
|||
|
||||
// Loop over all variables
|
||||
const double one_over_2delta = 1.0 / (2.0 * delta);
|
||||
for(Key key: factor) {
|
||||
for (Key key : factor) {
|
||||
// Compute central differences using the values struct.
|
||||
VectorValues dX = values.zeroVectors();
|
||||
const size_t cols = dX.dim(key);
|
||||
Matrix J = Matrix::Zero(rows, cols);
|
||||
for (size_t col = 0; col < cols; ++col) {
|
||||
Eigen::VectorXd dx = Eigen::VectorXd::Zero(cols);
|
||||
dx[col] = delta;
|
||||
dx(col) = delta;
|
||||
dX[key] = dx;
|
||||
Values eval_values = values.retract(dX);
|
||||
const Eigen::VectorXd left = factor.whitenedError(eval_values);
|
||||
dx[col] = -delta;
|
||||
dx(col) = -delta;
|
||||
dX[key] = dx;
|
||||
eval_values = values.retract(dX);
|
||||
const Eigen::VectorXd right = factor.whitenedError(eval_values);
|
||||
J.col(col) = (left - right) * one_over_2delta;
|
||||
}
|
||||
jacobians.push_back(std::make_pair(key,J));
|
||||
jacobians.push_back(std::make_pair(key, J));
|
||||
}
|
||||
|
||||
// Next step...return JacobianFactor
|
||||
|
|
|
@ -115,6 +115,10 @@ class Ordering {
|
|||
Ordering();
|
||||
Ordering(const gtsam::Ordering& other);
|
||||
|
||||
template <FACTOR_GRAPH = {gtsam::NonlinearFactorGraph,
|
||||
gtsam::GaussianFactorGraph}>
|
||||
static gtsam::Ordering Colamd(const FACTOR_GRAPH& graph);
|
||||
|
||||
// Testable
|
||||
void print(string s = "", const gtsam::KeyFormatter& keyFormatter =
|
||||
gtsam::DefaultKeyFormatter) const;
|
||||
|
|
|
@ -293,6 +293,19 @@ TEST(Expression, compose3) {
|
|||
EXPECT(expected == R3.keys());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Test compose with double type (should be multiplication).
|
||||
TEST(Expression, compose4) {
|
||||
// Create expression
|
||||
gtsam::Key key = 1;
|
||||
Double_ R1(key), R2(key);
|
||||
Double_ R3 = R1 * R2;
|
||||
|
||||
// Check keys
|
||||
set<Key> expected = list_of(1);
|
||||
EXPECT(expected == R3.keys());
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Test with ternary function.
|
||||
Rot3 composeThree(const Rot3& R1, const Rot3& R2, const Rot3& R3, OptionalJacobian<3, 3> H1,
|
||||
|
|
|
@ -35,37 +35,37 @@ using namespace gtsam::serializationTestHelpers;
|
|||
|
||||
/* ************************************************************************* */
|
||||
// Create GUIDs for Noisemodels
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,"gtsam_noiseModel_Isotropic");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,"gtsam_noiseModel_Isotropic")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Create GUIDs for factors
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::PriorFactor<gtsam::Pose3>, "gtsam::PriorFactor<gtsam::Pose3>");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::PriorFactor<gtsam::Pose3>, "gtsam::PriorFactor<gtsam::Pose3>")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional")
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Export all classes derived from Value
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3_S2);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3Bundler);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Point3);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Pose3);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Rot3);
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3_S2>);
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3DS2>);
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3Bundler>);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3_S2)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3Bundler)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Point3)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Pose3)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Rot3)
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3_S2>)
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3DS2>)
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3Bundler>)
|
||||
|
||||
namespace detail {
|
||||
template<class T> struct pack {
|
||||
|
|
|
@ -59,8 +59,8 @@ namespace gtsam {
|
|||
template<class CAMERA, class LANDMARK>
|
||||
class GeneralSFMFactor: public NoiseModelFactor2<CAMERA, LANDMARK> {
|
||||
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CAMERA);
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(LANDMARK);
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CAMERA)
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(LANDMARK)
|
||||
|
||||
static const int DimC = FixedDimension<CAMERA>::value;
|
||||
static const int DimL = FixedDimension<LANDMARK>::value;
|
||||
|
@ -202,7 +202,7 @@ struct traits<GeneralSFMFactor<CAMERA, LANDMARK> > : Testable<
|
|||
template<class CALIBRATION>
|
||||
class GeneralSFMFactor2: public NoiseModelFactor3<Pose3, Point3, CALIBRATION> {
|
||||
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION);
|
||||
GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION)
|
||||
static const int DimK = FixedDimension<CALIBRATION>::value;
|
||||
|
||||
protected:
|
||||
|
|
|
@ -9,20 +9,21 @@
|
|||
|
||||
namespace gtsam {
|
||||
/**
|
||||
* JacobianFactor for Schur complement that uses the "Nullspace Trick" by Mourikis
|
||||
* JacobianFactor for Schur complement that uses the "Nullspace Trick" by
|
||||
* Mourikis et al.
|
||||
*
|
||||
* This trick is equivalent to the Schur complement, but can be faster.
|
||||
* In essence, the linear factor |E*dp + F*dX - b|, where p is point and X are poses,
|
||||
* is multiplied by Enull, a matrix that spans the left nullspace of E, i.e.,
|
||||
* The mx3 matrix is analyzed with SVD as E = [Erange Enull]*S*V (mxm * mx3 * 3x3)
|
||||
* where Enull is an m x (m-3) matrix
|
||||
* Then Enull'*E*dp = 0, and
|
||||
* In essence, the linear factor |E*dp + F*dX - b|, where p is point and X are
|
||||
* poses, is multiplied by Enull, a matrix that spans the left nullspace of E,
|
||||
* i.e., The mx3 matrix is analyzed with SVD as E = [Erange Enull]*S*V (mxm *
|
||||
* mx3 * 3x3) where Enull is an m x (m-3) matrix Then Enull'*E*dp = 0, and
|
||||
* |Enull'*E*dp + Enull'*F*dX - Enull'*b| == |Enull'*F*dX - Enull'*b|
|
||||
* Normally F is m x 6*numKeys, and Enull'*F yields an (m-3) x 6*numKeys matrix.
|
||||
*
|
||||
* The code below assumes that F is block diagonal and is given as a vector of ZDim*D blocks.
|
||||
* Example: m = 4 (2 measurements), Enull = 4*1, F = 4*12 (for D=6)
|
||||
* Then Enull'*F = 1*4 * 4*12 = 1*12, but each 1*6 piece can be computed as a 1x2 * 2x6 mult
|
||||
* The code below assumes that F is block diagonal and is given as a vector of
|
||||
* ZDim*D blocks. Example: m = 4 (2 measurements), Enull = 4*1, F = 4*12 (for
|
||||
* D=6) Then Enull'*F = 1*4 * 4*12 = 1*12, but each 1*6 piece can be computed as
|
||||
* a 1x2 * 2x6 multiplication.
|
||||
*/
|
||||
template<size_t D, size_t ZDim>
|
||||
class JacobianFactorSVD: public RegularJacobianFactor<D> {
|
||||
|
@ -37,10 +38,10 @@ public:
|
|||
JacobianFactorSVD() {
|
||||
}
|
||||
|
||||
/// Empty constructor with keys
|
||||
JacobianFactorSVD(const KeyVector& keys, //
|
||||
const SharedDiagonal& model = SharedDiagonal()) :
|
||||
Base() {
|
||||
/// Empty constructor with keys.
|
||||
JacobianFactorSVD(const KeyVector& keys,
|
||||
const SharedDiagonal& model = SharedDiagonal())
|
||||
: Base() {
|
||||
Matrix zeroMatrix = Matrix::Zero(0, D);
|
||||
Vector zeroVector = Vector::Zero(0);
|
||||
std::vector<KeyMatrix> QF;
|
||||
|
@ -51,18 +52,21 @@ public:
|
|||
}
|
||||
|
||||
/**
|
||||
* @brief Constructor
|
||||
* Takes the CameraSet derivatives (as ZDim*D blocks of block-diagonal F)
|
||||
* and a reduced point derivative, Enull
|
||||
* and creates a reduced-rank Jacobian factor on the CameraSet
|
||||
* @brief Construct a new JacobianFactorSVD object, createing a reduced-rank
|
||||
* Jacobian factor on the CameraSet.
|
||||
*
|
||||
* @Fblocks:
|
||||
* @param keys keys associated with F blocks.
|
||||
* @param Fblocks CameraSet derivatives, ZDim*D blocks of block-diagonal F
|
||||
* @param Enull a reduced point derivative
|
||||
* @param b right-hand side
|
||||
* @param model noise model
|
||||
*/
|
||||
JacobianFactorSVD(const KeyVector& keys,
|
||||
const std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> >& Fblocks, const Matrix& Enull,
|
||||
const Vector& b, //
|
||||
const SharedDiagonal& model = SharedDiagonal()) :
|
||||
Base() {
|
||||
JacobianFactorSVD(
|
||||
const KeyVector& keys,
|
||||
const std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> >& Fblocks,
|
||||
const Matrix& Enull, const Vector& b,
|
||||
const SharedDiagonal& model = SharedDiagonal())
|
||||
: Base() {
|
||||
size_t numKeys = Enull.rows() / ZDim;
|
||||
size_t m2 = ZDim * numKeys - 3; // TODO: is this not just Enull.rows()?
|
||||
// PLAIN nullptr SPACE TRICK
|
||||
|
@ -74,9 +78,8 @@ public:
|
|||
QF.reserve(numKeys);
|
||||
for (size_t k = 0; k < Fblocks.size(); ++k) {
|
||||
Key key = keys[k];
|
||||
QF.push_back(
|
||||
KeyMatrix(key,
|
||||
(Enull.transpose()).block(0, ZDim * k, m2, ZDim) * Fblocks[k]));
|
||||
QF.emplace_back(
|
||||
key, (Enull.transpose()).block(0, ZDim * k, m2, ZDim) * Fblocks[k]);
|
||||
}
|
||||
JacobianFactor::fillTerms(QF, Enull.transpose() * b, model);
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
/**
|
||||
* @file ProjectionFactor.h
|
||||
* @brief Basic bearing factor from 2D measurement
|
||||
* @brief Reprojection of a LANDMARK to a 2D point.
|
||||
* @author Chris Beall
|
||||
* @author Richard Roberts
|
||||
* @author Frank Dellaert
|
||||
|
@ -22,17 +22,21 @@
|
|||
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/geometry/Point3.h>
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Non-linear factor for a constraint derived from a 2D measurement. The calibration is known here.
|
||||
* i.e. the main building block for visual SLAM.
|
||||
* Non-linear factor for a constraint derived from a 2D measurement.
|
||||
* The calibration is known here.
|
||||
* The main building block for visual SLAM.
|
||||
* @addtogroup SLAM
|
||||
*/
|
||||
template<class POSE, class LANDMARK, class CALIBRATION = Cal3_S2>
|
||||
template <class POSE = Pose3, class LANDMARK = Point3,
|
||||
class CALIBRATION = Cal3_S2>
|
||||
class GenericProjectionFactor: public NoiseModelFactor2<POSE, LANDMARK> {
|
||||
protected:
|
||||
|
||||
|
@ -57,9 +61,9 @@ namespace gtsam {
|
|||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
/// Default constructor
|
||||
GenericProjectionFactor() :
|
||||
measured_(0, 0), throwCheirality_(false), verboseCheirality_(false) {
|
||||
}
|
||||
GenericProjectionFactor() :
|
||||
measured_(0, 0), throwCheirality_(false), verboseCheirality_(false) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
# SLAM Factors
|
||||
|
||||
## GenericProjectionFactor (defined in ProjectionFactor.h)
|
||||
|
||||
Non-linear factor that minimizes the re-projection error with respect to a 2D measurement.
|
||||
The calibration is assumed known and passed in the constructor.
|
||||
The main building block for visual SLAM.
|
||||
|
||||
Templated on
|
||||
- `POSE`, default `Pose3`
|
||||
- `LANDMARK`, default `Point3`
|
||||
- `CALIBRATION`, default `Cal3_S2`
|
||||
|
||||
## SmartFactors
|
||||
|
||||
These are "structure-less" factors, i.e., rather than introducing a new variable for an observed 3D point or landmark, a single factor is created that provides a multi-view constraint on several poses and/or cameras.
|
||||
While one typically adds multiple GenericProjectionFactors (one for each observation of a landmark), a SmartFactor collects all measurements for a landmark, i.e., the factor graph contains 1 smart factor per landmark.
|
||||
|
||||
### SmartFactorBase
|
||||
|
||||
This is the base class for smart factors, templated on a `CAMERA` type.
|
||||
It has no internal point, but it saves the measurements, keeps a noise model, and an optional sensor pose.
|
||||
|
||||
### SmartProjectionFactor
|
||||
|
||||
Also templated on `CAMERA`. Triangulates a 3D point and keeps an estimate of it around.
|
||||
This factor operates with monocular cameras, and is used to optimize the camera pose
|
||||
*and* calibration for each camera, and requires variables of type `CAMERA` in values.
|
||||
|
||||
If the calibration is fixed use `SmartProjectionPoseFactor` instead!
|
||||
|
||||
|
||||
### SmartProjectionPoseFactor
|
||||
|
||||
Derives from `SmartProjectionFactor` but is templated on a `CALIBRATION` type, setting `CAMERA = PinholePose<CALIBRATION>`.
|
||||
This factor assumes that the camera calibration is fixed and the same for all cameras involved in this factor.
|
||||
The factor only constrains poses.
|
||||
|
||||
If the calibration should be optimized, as well, use `SmartProjectionFactor` instead!
|
||||
|
||||
### SmartProjectionRigFactor
|
||||
|
||||
Same as `SmartProjectionPoseFactor`, except:
|
||||
- it is templated on `CAMERA`, i.e., it allows cameras beyond pinhole;
|
||||
- it allows measurements from multiple cameras, each camera with fixed but potentially different intrinsics and extrinsics;
|
||||
- it allows multiple observations from the same pose/key, again, to model a multi-camera system.
|
||||
|
||||
## Linearized Smart Factors
|
||||
|
||||
The factors below are less likely to be relevant to the user, but result from using the non-linear smart factors above.
|
||||
|
||||
|
||||
### RegularImplicitSchurFactor
|
||||
|
||||
A specialization of a GaussianFactor to structure-less SFM, which is very fast in a conjugate gradient (CG) solver.
|
||||
It is produced by calling `createRegularImplicitSchurFactor` in `SmartFactorBase` or `SmartProjectionFactor`.
|
||||
|
||||
### JacobianFactorQ
|
||||
|
||||
A RegularJacobianFactor that uses some badly documented reduction on the Jacobians.
|
||||
|
||||
### JacobianFactorQR
|
||||
|
||||
A RegularJacobianFactor that eliminates a point using sequential elimination.
|
||||
|
||||
### JacobianFactorQR
|
||||
|
||||
A RegularJacobianFactor that uses the "Nullspace Trick" by Mourikis et al. See the documentation in the file, which *is* well documented.
|
|
@ -1,6 +1,6 @@
|
|||
/**
|
||||
* @file RegularImplicitSchurFactor.h
|
||||
* @brief A new type of linear factor (GaussianFactor), which is subclass of GaussianFactor
|
||||
* @brief A subclass of GaussianFactor specialized to structureless SFM.
|
||||
* @author Frank Dellaert
|
||||
* @author Luca Carlone
|
||||
*/
|
||||
|
@ -20,6 +20,20 @@ namespace gtsam {
|
|||
|
||||
/**
|
||||
* RegularImplicitSchurFactor
|
||||
*
|
||||
* A specialization of a GaussianFactor to structure-less SFM, which is very
|
||||
* fast in a conjugate gradient (CG) solver. Specifically, as measured in
|
||||
* timeSchurFactors.cpp, it stays very fast for an increasing number of cameras.
|
||||
* The magic is in multiplyHessianAdd, which does the Hessian-vector multiply at
|
||||
* the core of CG, and implements
|
||||
* y += F'*alpha*(I - E*P*E')*F*x
|
||||
* where
|
||||
* - F is the 2mx6m Jacobian of the m 2D measurements wrpt m 6DOF poses
|
||||
* - E is the 2mx3 Jacabian of the m 2D measurements wrpt a 3D point
|
||||
* - P is the covariance on the point
|
||||
* The equation above implicitly executes the Schur complement by removing the
|
||||
* information E*P*E' from the Hessian. It is also very fast as we do not use
|
||||
* the full 6m*6m F matrix, but rather only it's m 6x6 diagonal blocks.
|
||||
*/
|
||||
template<class CAMERA>
|
||||
class RegularImplicitSchurFactor: public GaussianFactor {
|
||||
|
@ -38,9 +52,10 @@ protected:
|
|||
static const int ZDim = traits<Z>::dimension; ///< Measurement dimension
|
||||
|
||||
typedef Eigen::Matrix<double, ZDim, D> MatrixZD; ///< type of an F block
|
||||
typedef Eigen::Matrix<double, D, D> MatrixDD; ///< camera hessian
|
||||
typedef Eigen::Matrix<double, D, D> MatrixDD; ///< camera Hessian
|
||||
typedef std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> > FBlocks;
|
||||
|
||||
const std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> > FBlocks_; ///< All ZDim*D F blocks (one for each camera)
|
||||
FBlocks FBlocks_; ///< All ZDim*D F blocks (one for each camera)
|
||||
const Matrix PointCovariance_; ///< the 3*3 matrix P = inv(E'E) (2*2 if degenerate)
|
||||
const Matrix E_; ///< The 2m*3 E Jacobian with respect to the point
|
||||
const Vector b_; ///< 2m-dimensional RHS vector
|
||||
|
@ -52,17 +67,25 @@ public:
|
|||
}
|
||||
|
||||
/// Construct from blocks of F, E, inv(E'*E), and RHS vector b
|
||||
RegularImplicitSchurFactor(const KeyVector& keys,
|
||||
const std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> >& FBlocks, const Matrix& E, const Matrix& P,
|
||||
const Vector& b) :
|
||||
GaussianFactor(keys), FBlocks_(FBlocks), PointCovariance_(P), E_(E), b_(b) {
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Construct a new RegularImplicitSchurFactor object.
|
||||
*
|
||||
* @param keys keys corresponding to cameras
|
||||
* @param Fs All ZDim*D F blocks (one for each camera)
|
||||
* @param E Jacobian of measurements wrpt point.
|
||||
* @param P point covariance matrix
|
||||
* @param b RHS vector
|
||||
*/
|
||||
RegularImplicitSchurFactor(const KeyVector& keys, const FBlocks& Fs,
|
||||
const Matrix& E, const Matrix& P, const Vector& b)
|
||||
: GaussianFactor(keys), FBlocks_(Fs), PointCovariance_(P), E_(E), b_(b) {}
|
||||
|
||||
/// Destructor
|
||||
~RegularImplicitSchurFactor() override {
|
||||
}
|
||||
|
||||
std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> >& FBlocks() const {
|
||||
const FBlocks& Fs() const {
|
||||
return FBlocks_;
|
||||
}
|
||||
|
||||
|
|
|
@ -37,12 +37,14 @@
|
|||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* @brief Base class for smart factors
|
||||
* @brief Base class for smart factors.
|
||||
* This base class has no internal point, but it has a measurement, noise model
|
||||
* and an optional sensor pose.
|
||||
* This class mainly computes the derivatives and returns them as a variety of factors.
|
||||
* The methods take a Cameras argument, which should behave like PinholeCamera, and
|
||||
* the value of a point, which is kept in the base class.
|
||||
* This class mainly computes the derivatives and returns them as a variety of
|
||||
* factors. The methods take a CameraSet<CAMERA> argument and the value of a
|
||||
* point, which is kept in the derived class.
|
||||
*
|
||||
* @tparam CAMERA should behave like a PinholeCamera.
|
||||
*/
|
||||
template<class CAMERA>
|
||||
class SmartFactorBase: public NonlinearFactor {
|
||||
|
@ -64,19 +66,20 @@ protected:
|
|||
/**
|
||||
* As of Feb 22, 2015, the noise model is the same for all measurements and
|
||||
* is isotropic. This allows for moving most calculations of Schur complement
|
||||
* etc to be moved to CameraSet very easily, and also agrees pragmatically
|
||||
* etc. to be easily moved to CameraSet, and also agrees pragmatically
|
||||
* with what is normally done.
|
||||
*/
|
||||
SharedIsotropic noiseModel_;
|
||||
|
||||
/**
|
||||
* 2D measurement and noise model for each of the m views
|
||||
* We keep a copy of measurements for I/O and computing the error.
|
||||
* Measurements for each of the m views.
|
||||
* We keep a copy of the measurements for I/O and computing the error.
|
||||
* The order is kept the same as the keys that we use to create the factor.
|
||||
*/
|
||||
ZVector measured_;
|
||||
|
||||
boost::optional<Pose3> body_P_sensor_; ///< Pose of the camera in the body frame
|
||||
boost::optional<Pose3>
|
||||
body_P_sensor_; ///< Pose of the camera in the body frame
|
||||
|
||||
// Cache for Fblocks, to avoid a malloc ever time we re-linearize
|
||||
mutable FBlocks Fs;
|
||||
|
@ -84,16 +87,16 @@ protected:
|
|||
public:
|
||||
GTSAM_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
/// shorthand for a smart pointer to a factor.
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
/// We use the new CameraSte data structure to refer to a set of cameras
|
||||
/// The CameraSet data structure is used to refer to a set of cameras.
|
||||
typedef CameraSet<CAMERA> Cameras;
|
||||
|
||||
/// Default Constructor, for serialization
|
||||
/// Default Constructor, for serialization.
|
||||
SmartFactorBase() {}
|
||||
|
||||
/// Constructor
|
||||
/// Construct with given noise model and optional arguments.
|
||||
SmartFactorBase(const SharedNoiseModel& sharedNoiseModel,
|
||||
boost::optional<Pose3> body_P_sensor = boost::none,
|
||||
size_t expectedNumberCameras = 10)
|
||||
|
@ -111,12 +114,12 @@ protected:
|
|||
noiseModel_ = sharedIsotropic;
|
||||
}
|
||||
|
||||
/// Virtual destructor, subclasses from NonlinearFactor
|
||||
/// Virtual destructor, subclasses from NonlinearFactor.
|
||||
~SmartFactorBase() override {
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a new measurement and pose/camera key
|
||||
* Add a new measurement and pose/camera key.
|
||||
* @param measured is the 2m dimensional projection of a single landmark
|
||||
* @param key is the index corresponding to the camera observing the landmark
|
||||
*/
|
||||
|
@ -129,9 +132,7 @@ protected:
|
|||
this->keys_.push_back(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a bunch of measurements, together with the camera keys
|
||||
*/
|
||||
/// Add a bunch of measurements, together with the camera keys.
|
||||
void add(const ZVector& measurements, const KeyVector& cameraKeys) {
|
||||
assert(measurements.size() == cameraKeys.size());
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
|
@ -140,8 +141,8 @@ protected:
|
|||
}
|
||||
|
||||
/**
|
||||
* Adds an entire SfM_track (collection of cameras observing a single point).
|
||||
* The noise is assumed to be the same for all measurements
|
||||
* Add an entire SfM_track (collection of cameras observing a single point).
|
||||
* The noise is assumed to be the same for all measurements.
|
||||
*/
|
||||
template<class SFM_TRACK>
|
||||
void add(const SFM_TRACK& trackToAdd) {
|
||||
|
@ -151,17 +152,13 @@ protected:
|
|||
}
|
||||
}
|
||||
|
||||
/// get the dimension (number of rows!) of the factor
|
||||
size_t dim() const override {
|
||||
return ZDim * this->measured_.size();
|
||||
}
|
||||
/// Return the dimension (number of rows!) of the factor.
|
||||
size_t dim() const override { return ZDim * this->measured_.size(); }
|
||||
|
||||
/** return the measurements */
|
||||
const ZVector& measured() const {
|
||||
return measured_;
|
||||
}
|
||||
/// Return the 2D measurements (ZDim, in general).
|
||||
const ZVector& measured() const { return measured_; }
|
||||
|
||||
/// Collect all cameras: important that in key order
|
||||
/// Collect all cameras: important that in key order.
|
||||
virtual Cameras cameras(const Values& values) const {
|
||||
Cameras cameras;
|
||||
for(const Key& k: this->keys_)
|
||||
|
@ -188,25 +185,30 @@ protected:
|
|||
|
||||
/// equals
|
||||
bool equals(const NonlinearFactor& p, double tol = 1e-9) const override {
|
||||
const This *e = dynamic_cast<const This*>(&p);
|
||||
|
||||
bool areMeasurementsEqual = true;
|
||||
for (size_t i = 0; i < measured_.size(); i++) {
|
||||
if (traits<Z>::Equals(this->measured_.at(i), e->measured_.at(i), tol) == false)
|
||||
areMeasurementsEqual = false;
|
||||
break;
|
||||
if (const This* e = dynamic_cast<const This*>(&p)) {
|
||||
// Check that all measurements are the same.
|
||||
for (size_t i = 0; i < measured_.size(); i++) {
|
||||
if (!traits<Z>::Equals(this->measured_.at(i), e->measured_.at(i), tol))
|
||||
return false;
|
||||
}
|
||||
// If so, check base class.
|
||||
return Base::equals(p, tol);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
return e && Base::equals(p, tol) && areMeasurementsEqual;
|
||||
}
|
||||
|
||||
/// Compute reprojection errors [h(x)-z] = [cameras.project(p)-z] and
|
||||
/// derivatives
|
||||
/// derivatives. This is the error before the noise model is applied.
|
||||
template <class POINT>
|
||||
Vector unwhitenedError(
|
||||
const Cameras& cameras, const POINT& point,
|
||||
boost::optional<typename Cameras::FBlocks&> Fs = boost::none, //
|
||||
boost::optional<Matrix&> E = boost::none) const {
|
||||
Vector ue = cameras.reprojectionError(point, measured_, Fs, E);
|
||||
// Reproject, with optional derivatives.
|
||||
Vector error = cameras.reprojectionError(point, measured_, Fs, E);
|
||||
|
||||
// Apply chain rule if body_P_sensor_ is given.
|
||||
if (body_P_sensor_ && Fs) {
|
||||
const Pose3 sensor_P_body = body_P_sensor_->inverse();
|
||||
constexpr int camera_dim = traits<CAMERA>::dimension;
|
||||
|
@ -224,52 +226,60 @@ protected:
|
|||
Fs->at(i) = Fs->at(i) * J;
|
||||
}
|
||||
}
|
||||
correctForMissingMeasurements(cameras, ue, Fs, E);
|
||||
return ue;
|
||||
|
||||
// Correct the Jacobians in case some measurements are missing.
|
||||
correctForMissingMeasurements(cameras, error, Fs, E);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* This corrects the Jacobians for the case in which some pixel measurement is missing (nan)
|
||||
* In practice, this does not do anything in the monocular case, but it is implemented in the stereo version
|
||||
* This corrects the Jacobians for the case in which some 2D measurement is
|
||||
* missing (nan). In practice, this does not do anything in the monocular
|
||||
* case, but it is implemented in the stereo version.
|
||||
*/
|
||||
virtual void correctForMissingMeasurements(const Cameras& cameras, Vector& ue, boost::optional<typename Cameras::FBlocks&> Fs = boost::none,
|
||||
boost::optional<Matrix&> E = boost::none) const {}
|
||||
virtual void correctForMissingMeasurements(
|
||||
const Cameras& cameras, Vector& ue,
|
||||
boost::optional<typename Cameras::FBlocks&> Fs = boost::none,
|
||||
boost::optional<Matrix&> E = boost::none) const {}
|
||||
|
||||
/**
|
||||
* Calculate vector of re-projection errors [h(x)-z] = [cameras.project(p) - z]
|
||||
* Noise model applied
|
||||
* Calculate vector of re-projection errors [h(x)-z] = [cameras.project(p) -
|
||||
* z], with the noise model applied.
|
||||
*/
|
||||
template<class POINT>
|
||||
Vector whitenedError(const Cameras& cameras, const POINT& point) const {
|
||||
Vector e = cameras.reprojectionError(point, measured_);
|
||||
Vector error = cameras.reprojectionError(point, measured_);
|
||||
if (noiseModel_)
|
||||
noiseModel_->whitenInPlace(e);
|
||||
return e;
|
||||
noiseModel_->whitenInPlace(error);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Calculate the error of the factor.
|
||||
* This is the log-likelihood, e.g. \f$ 0.5(h(x)-z)^2/\sigma^2 \f$ in case of Gaussian.
|
||||
* In this class, we take the raw prediction error \f$ h(x)-z \f$, ask the noise model
|
||||
* to transform it to \f$ (h(x)-z)^2/\sigma^2 \f$, and then multiply by 0.5.
|
||||
* Will be used in "error(Values)" function required by NonlinearFactor base class
|
||||
/**
|
||||
* Calculate the error of the factor.
|
||||
* This is the log-likelihood, e.g. \f$ 0.5(h(x)-z)^2/\sigma^2 \f$ in case of
|
||||
* Gaussian. In this class, we take the raw prediction error \f$ h(x)-z \f$,
|
||||
* ask the noise model to transform it to \f$ (h(x)-z)^2/\sigma^2 \f$, and
|
||||
* then multiply by 0.5. Will be used in "error(Values)" function required by
|
||||
* NonlinearFactor base class
|
||||
*/
|
||||
template<class POINT>
|
||||
double totalReprojectionError(const Cameras& cameras,
|
||||
const POINT& point) const {
|
||||
Vector e = whitenedError(cameras, point);
|
||||
return 0.5 * e.dot(e);
|
||||
Vector error = whitenedError(cameras, point);
|
||||
return 0.5 * error.dot(error);
|
||||
}
|
||||
|
||||
/// Computes Point Covariance P from E
|
||||
static Matrix PointCov(Matrix& E) {
|
||||
/// Computes Point Covariance P from the "point Jacobian" E.
|
||||
static Matrix PointCov(const Matrix& E) {
|
||||
return (E.transpose() * E).inverse();
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute F, E, and b (called below in both vanilla and SVD versions), where
|
||||
* F is a vector of derivatives wrpt the cameras, and E the stacked derivatives
|
||||
* with respect to the point. The value of cameras/point are passed as parameters.
|
||||
* TODO: Kill this obsolete method
|
||||
* Compute F, E, and b (called below in both vanilla and SVD versions), where
|
||||
* F is a vector of derivatives wrpt the cameras, and E the stacked
|
||||
* derivatives with respect to the point. The value of cameras/point are
|
||||
* passed as parameters.
|
||||
*/
|
||||
template<class POINT>
|
||||
void computeJacobians(FBlocks& Fs, Matrix& E, Vector& b,
|
||||
|
@ -281,7 +291,11 @@ protected:
|
|||
b = -unwhitenedError(cameras, point, Fs, E);
|
||||
}
|
||||
|
||||
/// SVD version
|
||||
/**
|
||||
* SVD version that produces smaller Jacobian matrices by doing an SVD
|
||||
* decomposition on E, and returning the left nulkl-space of E.
|
||||
* See JacobianFactorSVD for more documentation.
|
||||
* */
|
||||
template<class POINT>
|
||||
void computeJacobiansSVD(FBlocks& Fs, Matrix& Enull,
|
||||
Vector& b, const Cameras& cameras, const POINT& point) const {
|
||||
|
@ -291,14 +305,14 @@ protected:
|
|||
|
||||
static const int N = FixedDimension<POINT>::value; // 2 (Unit3) or 3 (Point3)
|
||||
|
||||
// Do SVD on A
|
||||
// Do SVD on A.
|
||||
Eigen::JacobiSVD<Matrix> svd(E, Eigen::ComputeFullU);
|
||||
Vector s = svd.singularValues();
|
||||
size_t m = this->keys_.size();
|
||||
Enull = svd.matrixU().block(0, N, ZDim * m, ZDim * m - N); // last ZDim*m-N columns
|
||||
}
|
||||
|
||||
/// Linearize to a Hessianfactor
|
||||
/// Linearize to a Hessianfactor.
|
||||
// TODO(dellaert): Not used/tested anywhere and not properly whitened.
|
||||
boost::shared_ptr<RegularHessianFactor<Dim> > createHessianFactor(
|
||||
const Cameras& cameras, const Point3& point, const double lambda = 0.0,
|
||||
bool diagonalDamping = false) const {
|
||||
|
@ -351,9 +365,7 @@ protected:
|
|||
P, b);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return Jacobians as JacobianFactorQ
|
||||
*/
|
||||
/// Return Jacobians as JacobianFactorQ.
|
||||
boost::shared_ptr<JacobianFactorQ<Dim, ZDim> > createJacobianQFactor(
|
||||
const Cameras& cameras, const Point3& point, double lambda = 0.0,
|
||||
bool diagonalDamping = false) const {
|
||||
|
@ -368,8 +380,8 @@ protected:
|
|||
}
|
||||
|
||||
/**
|
||||
* Return Jacobians as JacobianFactorSVD
|
||||
* TODO lambda is currently ignored
|
||||
* Return Jacobians as JacobianFactorSVD.
|
||||
* TODO(dellaert): lambda is currently ignored
|
||||
*/
|
||||
boost::shared_ptr<JacobianFactor> createJacobianSVDFactor(
|
||||
const Cameras& cameras, const Point3& point, double lambda = 0.0) const {
|
||||
|
@ -393,7 +405,7 @@ protected:
|
|||
F.block<ZDim, Dim>(ZDim * i, Dim * i) = Fs.at(i);
|
||||
}
|
||||
|
||||
|
||||
// Return sensor pose.
|
||||
Pose3 body_P_sensor() const{
|
||||
if(body_P_sensor_)
|
||||
return *body_P_sensor_;
|
||||
|
|
|
@ -61,15 +61,17 @@ protected:
|
|||
/// @name Caching triangulation
|
||||
/// @{
|
||||
mutable TriangulationResult result_; ///< result from triangulateSafe
|
||||
mutable std::vector<Pose3, Eigen::aligned_allocator<Pose3> > cameraPosesTriangulation_; ///< current triangulation poses
|
||||
mutable std::vector<Pose3, Eigen::aligned_allocator<Pose3> >
|
||||
cameraPosesTriangulation_; ///< current triangulation poses
|
||||
/// @}
|
||||
|
||||
public:
|
||||
public:
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
/// shorthand for a set of cameras
|
||||
typedef CAMERA Camera;
|
||||
typedef CameraSet<CAMERA> Cameras;
|
||||
|
||||
/**
|
||||
|
@ -116,21 +118,31 @@ public:
|
|||
&& Base::equals(p, tol);
|
||||
}
|
||||
|
||||
/// Check if the new linearization point is the same as the one used for previous triangulation
|
||||
/**
|
||||
* @brief Check if the new linearization point is the same as the one used for
|
||||
* previous triangulation.
|
||||
*
|
||||
* @param cameras
|
||||
* @return true if we need to re-triangulate.
|
||||
*/
|
||||
bool decideIfTriangulate(const Cameras& cameras) const {
|
||||
// several calls to linearize will be done from the same linearization point, hence it is not needed to re-triangulate
|
||||
// Note that this is not yet "selecting linearization", that will come later, and we only check if the
|
||||
// current linearization is the "same" (up to tolerance) w.r.t. the last time we triangulated the point
|
||||
// Several calls to linearize will be done from the same linearization
|
||||
// point, hence it is not needed to re-triangulate. Note that this is not
|
||||
// yet "selecting linearization", that will come later, and we only check if
|
||||
// the current linearization is the "same" (up to tolerance) w.r.t. the last
|
||||
// time we triangulated the point.
|
||||
|
||||
size_t m = cameras.size();
|
||||
|
||||
bool retriangulate = false;
|
||||
|
||||
// if we do not have a previous linearization point or the new linearization point includes more poses
|
||||
// Definitely true if we do not have a previous linearization point or the
|
||||
// new linearization point includes more poses.
|
||||
if (cameraPosesTriangulation_.empty()
|
||||
|| cameras.size() != cameraPosesTriangulation_.size())
|
||||
retriangulate = true;
|
||||
|
||||
// Otherwise, check poses against cache.
|
||||
if (!retriangulate) {
|
||||
for (size_t i = 0; i < cameras.size(); i++) {
|
||||
if (!cameras[i].pose().equals(cameraPosesTriangulation_[i],
|
||||
|
@ -141,7 +153,8 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
if (retriangulate) { // we store the current poses used for triangulation
|
||||
// Store the current poses used for triangulation if we will re-triangulate.
|
||||
if (retriangulate) {
|
||||
cameraPosesTriangulation_.clear();
|
||||
cameraPosesTriangulation_.reserve(m);
|
||||
for (size_t i = 0; i < m; i++)
|
||||
|
@ -149,10 +162,15 @@ public:
|
|||
cameraPosesTriangulation_.push_back(cameras[i].pose());
|
||||
}
|
||||
|
||||
return retriangulate; // if we arrive to this point all poses are the same and we don't need re-triangulation
|
||||
return retriangulate;
|
||||
}
|
||||
|
||||
/// triangulateSafe
|
||||
/**
|
||||
* @brief Call gtsam::triangulateSafe iff we need to re-triangulate.
|
||||
*
|
||||
* @param cameras
|
||||
* @return TriangulationResult
|
||||
*/
|
||||
TriangulationResult triangulateSafe(const Cameras& cameras) const {
|
||||
|
||||
size_t m = cameras.size();
|
||||
|
@ -166,17 +184,21 @@ public:
|
|||
return result_;
|
||||
}
|
||||
|
||||
/// triangulate
|
||||
/**
|
||||
* @brief Possibly re-triangulate before calculating Jacobians.
|
||||
*
|
||||
* @param cameras
|
||||
* @return true if we could safely triangulate
|
||||
*/
|
||||
bool triangulateForLinearize(const Cameras& cameras) const {
|
||||
triangulateSafe(cameras); // imperative, might reset result_
|
||||
return bool(result_);
|
||||
}
|
||||
|
||||
/// linearize returns a Hessianfactor that is an approximation of error(p)
|
||||
/// Create a Hessianfactor that is an approximation of error(p).
|
||||
boost::shared_ptr<RegularHessianFactor<Base::Dim> > createHessianFactor(
|
||||
const Cameras& cameras, const double lambda = 0.0, bool diagonalDamping =
|
||||
false) const {
|
||||
|
||||
const Cameras& cameras, const double lambda = 0.0,
|
||||
bool diagonalDamping = false) const {
|
||||
size_t numKeys = this->keys_.size();
|
||||
// Create structures for Hessian Factors
|
||||
KeyVector js;
|
||||
|
@ -184,39 +206,38 @@ public:
|
|||
std::vector<Vector> gs(numKeys);
|
||||
|
||||
if (this->measured_.size() != cameras.size())
|
||||
throw std::runtime_error("SmartProjectionHessianFactor: this->measured_"
|
||||
".size() inconsistent with input");
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionHessianFactor: this->measured_"
|
||||
".size() inconsistent with input");
|
||||
|
||||
triangulateSafe(cameras);
|
||||
|
||||
if (params_.degeneracyMode == ZERO_ON_DEGENERACY && !result_) {
|
||||
// failed: return"empty" Hessian
|
||||
for(Matrix& m: Gs)
|
||||
m = Matrix::Zero(Base::Dim, Base::Dim);
|
||||
for(Vector& v: gs)
|
||||
v = Vector::Zero(Base::Dim);
|
||||
for (Matrix& m : Gs) m = Matrix::Zero(Base::Dim, Base::Dim);
|
||||
for (Vector& v : gs) v = Vector::Zero(Base::Dim);
|
||||
return boost::make_shared<RegularHessianFactor<Base::Dim> >(this->keys_,
|
||||
Gs, gs, 0.0);
|
||||
Gs, gs, 0.0);
|
||||
}
|
||||
|
||||
// Jacobian could be 3D Point3 OR 2D Unit3, difference is E.cols().
|
||||
std::vector<typename Base::MatrixZD, Eigen::aligned_allocator<typename Base::MatrixZD> > Fblocks;
|
||||
typename Base::FBlocks Fs;
|
||||
Matrix E;
|
||||
Vector b;
|
||||
computeJacobiansWithTriangulatedPoint(Fblocks, E, b, cameras);
|
||||
computeJacobiansWithTriangulatedPoint(Fs, E, b, cameras);
|
||||
|
||||
// Whiten using noise model
|
||||
Base::whitenJacobians(Fblocks, E, b);
|
||||
Base::whitenJacobians(Fs, E, b);
|
||||
|
||||
// build augmented hessian
|
||||
SymmetricBlockMatrix augmentedHessian = //
|
||||
Cameras::SchurComplement(Fblocks, E, b, lambda, diagonalDamping);
|
||||
SymmetricBlockMatrix augmentedHessian = //
|
||||
Cameras::SchurComplement(Fs, E, b, lambda, diagonalDamping);
|
||||
|
||||
return boost::make_shared<RegularHessianFactor<Base::Dim> >(this->keys_,
|
||||
augmentedHessian);
|
||||
return boost::make_shared<RegularHessianFactor<Base::Dim> >(
|
||||
this->keys_, augmentedHessian);
|
||||
}
|
||||
|
||||
// create factor
|
||||
// Create RegularImplicitSchurFactor factor.
|
||||
boost::shared_ptr<RegularImplicitSchurFactor<CAMERA> > createRegularImplicitSchurFactor(
|
||||
const Cameras& cameras, double lambda) const {
|
||||
if (triangulateForLinearize(cameras))
|
||||
|
@ -226,7 +247,7 @@ public:
|
|||
return boost::shared_ptr<RegularImplicitSchurFactor<CAMERA> >();
|
||||
}
|
||||
|
||||
/// create factor
|
||||
/// Create JacobianFactorQ factor.
|
||||
boost::shared_ptr<JacobianFactorQ<Base::Dim, 2> > createJacobianQFactor(
|
||||
const Cameras& cameras, double lambda) const {
|
||||
if (triangulateForLinearize(cameras))
|
||||
|
@ -236,13 +257,13 @@ public:
|
|||
return boost::make_shared<JacobianFactorQ<Base::Dim, 2> >(this->keys_);
|
||||
}
|
||||
|
||||
/// Create a factor, takes values
|
||||
/// Create JacobianFactorQ factor, takes values.
|
||||
boost::shared_ptr<JacobianFactorQ<Base::Dim, 2> > createJacobianQFactor(
|
||||
const Values& values, double lambda) const {
|
||||
return createJacobianQFactor(this->cameras(values), lambda);
|
||||
}
|
||||
|
||||
/// different (faster) way to compute Jacobian factor
|
||||
/// Different (faster) way to compute a JacobianFactorSVD factor.
|
||||
boost::shared_ptr<JacobianFactor> createJacobianSVDFactor(
|
||||
const Cameras& cameras, double lambda) const {
|
||||
if (triangulateForLinearize(cameras))
|
||||
|
@ -252,19 +273,19 @@ public:
|
|||
return boost::make_shared<JacobianFactorSVD<Base::Dim, 2> >(this->keys_);
|
||||
}
|
||||
|
||||
/// linearize to a Hessianfactor
|
||||
/// Linearize to a Hessianfactor.
|
||||
virtual boost::shared_ptr<RegularHessianFactor<Base::Dim> > linearizeToHessian(
|
||||
const Values& values, double lambda = 0.0) const {
|
||||
return createHessianFactor(this->cameras(values), lambda);
|
||||
}
|
||||
|
||||
/// linearize to an Implicit Schur factor
|
||||
/// Linearize to an Implicit Schur factor.
|
||||
virtual boost::shared_ptr<RegularImplicitSchurFactor<CAMERA> > linearizeToImplicit(
|
||||
const Values& values, double lambda = 0.0) const {
|
||||
return createRegularImplicitSchurFactor(this->cameras(values), lambda);
|
||||
}
|
||||
|
||||
/// linearize to a JacobianfactorQ
|
||||
/// Linearize to a JacobianfactorQ.
|
||||
virtual boost::shared_ptr<JacobianFactorQ<Base::Dim, 2> > linearizeToJacobian(
|
||||
const Values& values, double lambda = 0.0) const {
|
||||
return createJacobianQFactor(this->cameras(values), lambda);
|
||||
|
@ -334,7 +355,7 @@ public:
|
|||
/// Assumes the point has been computed
|
||||
/// Note E can be 2m*3 or 2m*2, in case point is degenerate
|
||||
void computeJacobiansWithTriangulatedPoint(
|
||||
std::vector<typename Base::MatrixZD, Eigen::aligned_allocator<typename Base::MatrixZD> >& Fblocks, Matrix& E, Vector& b,
|
||||
typename Base::FBlocks& Fs, Matrix& E, Vector& b,
|
||||
const Cameras& cameras) const {
|
||||
|
||||
if (!result_) {
|
||||
|
@ -342,32 +363,32 @@ public:
|
|||
// TODO check flag whether we should do this
|
||||
Unit3 backProjected = cameras[0].backprojectPointAtInfinity(
|
||||
this->measured_.at(0));
|
||||
Base::computeJacobians(Fblocks, E, b, cameras, backProjected);
|
||||
Base::computeJacobians(Fs, E, b, cameras, backProjected);
|
||||
} else {
|
||||
// valid result: just return Base version
|
||||
Base::computeJacobians(Fblocks, E, b, cameras, *result_);
|
||||
Base::computeJacobians(Fs, E, b, cameras, *result_);
|
||||
}
|
||||
}
|
||||
|
||||
/// Version that takes values, and creates the point
|
||||
bool triangulateAndComputeJacobians(
|
||||
std::vector<typename Base::MatrixZD, Eigen::aligned_allocator<typename Base::MatrixZD> >& Fblocks, Matrix& E, Vector& b,
|
||||
typename Base::FBlocks& Fs, Matrix& E, Vector& b,
|
||||
const Values& values) const {
|
||||
Cameras cameras = this->cameras(values);
|
||||
bool nonDegenerate = triangulateForLinearize(cameras);
|
||||
if (nonDegenerate)
|
||||
computeJacobiansWithTriangulatedPoint(Fblocks, E, b, cameras);
|
||||
computeJacobiansWithTriangulatedPoint(Fs, E, b, cameras);
|
||||
return nonDegenerate;
|
||||
}
|
||||
|
||||
/// takes values
|
||||
bool triangulateAndComputeJacobiansSVD(
|
||||
std::vector<typename Base::MatrixZD, Eigen::aligned_allocator<typename Base::MatrixZD> >& Fblocks, Matrix& Enull, Vector& b,
|
||||
typename Base::FBlocks& Fs, Matrix& Enull, Vector& b,
|
||||
const Values& values) const {
|
||||
Cameras cameras = this->cameras(values);
|
||||
bool nonDegenerate = triangulateForLinearize(cameras);
|
||||
if (nonDegenerate)
|
||||
Base::computeJacobiansSVD(Fblocks, Enull, b, cameras, *result_);
|
||||
Base::computeJacobiansSVD(Fs, Enull, b, cameras, *result_);
|
||||
return nonDegenerate;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,370 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SmartProjectionRigFactor.h
|
||||
* @brief Smart factor on poses, assuming camera calibration is fixed.
|
||||
* Same as SmartProjectionPoseFactor, except:
|
||||
* - it is templated on CAMERA (i.e., it allows cameras beyond pinhole)
|
||||
* - it admits a different calibration for each measurement (i.e., it
|
||||
* can model a multi-camera rig system)
|
||||
* - it allows multiple observations from the same pose/key (again, to
|
||||
* model a multi-camera system)
|
||||
* @author Luca Carlone
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/slam/SmartProjectionFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
/**
|
||||
*
|
||||
* @addtogroup SLAM
|
||||
*
|
||||
* If you are using the factor, please cite:
|
||||
* L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert, Eliminating
|
||||
* conditionally independent sets in factor graphs: a unifying perspective based
|
||||
* on smart factors, Int. Conf. on Robotics and Automation (ICRA), 2014.
|
||||
*/
|
||||
|
||||
/**
|
||||
* This factor assumes that camera calibration is fixed (but each measurement
|
||||
* can be taken by a different camera in the rig, hence can have a different
|
||||
* extrinsic and intrinsic calibration). The factor only constrains poses
|
||||
* (variable dimension is 6 for each pose). This factor requires that values
|
||||
* contains the involved poses (Pose3). If all measurements share the same
|
||||
* calibration (i.e., are from the same camera), use SmartProjectionPoseFactor
|
||||
* instead! If the calibration should be optimized, as well, use
|
||||
* SmartProjectionFactor instead!
|
||||
* @addtogroup SLAM
|
||||
*/
|
||||
template <class CAMERA>
|
||||
class SmartProjectionRigFactor : public SmartProjectionFactor<CAMERA> {
|
||||
private:
|
||||
typedef SmartProjectionFactor<CAMERA> Base;
|
||||
typedef SmartProjectionRigFactor<CAMERA> This;
|
||||
typedef typename CAMERA::CalibrationType CALIBRATION;
|
||||
|
||||
static const int DimPose = 6; ///< Pose3 dimension
|
||||
static const int ZDim = 2; ///< Measurement dimension
|
||||
|
||||
protected:
|
||||
/// vector of keys (one for each observation) with potentially repeated keys
|
||||
KeyVector nonUniqueKeys_;
|
||||
|
||||
/// cameras in the rig (fixed poses wrt body and intrinsics, for each camera)
|
||||
boost::shared_ptr<typename Base::Cameras> cameraRig_;
|
||||
|
||||
/// vector of camera Ids (one for each observation, in the same order),
|
||||
/// identifying which camera took the measurement
|
||||
FastVector<size_t> cameraIds_;
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
typedef CAMERA Camera;
|
||||
typedef CameraSet<CAMERA> Cameras;
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
/// Default constructor, only for serialization
|
||||
SmartProjectionRigFactor() {}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param sharedNoiseModel isotropic noise model for the 2D feature
|
||||
* measurements
|
||||
* @param cameraRig set of cameras (fixed poses wrt body and intrinsics) in
|
||||
* the camera rig
|
||||
* @param params parameters for the smart projection factors
|
||||
*/
|
||||
SmartProjectionRigFactor(
|
||||
const SharedNoiseModel& sharedNoiseModel,
|
||||
const boost::shared_ptr<Cameras>& cameraRig,
|
||||
const SmartProjectionParams& params = SmartProjectionParams())
|
||||
: Base(sharedNoiseModel, params), cameraRig_(cameraRig) {
|
||||
// throw exception if configuration is not supported by this factor
|
||||
if (Base::params_.degeneracyMode != gtsam::ZERO_ON_DEGENERACY)
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionRigFactor: "
|
||||
"degeneracyMode must be set to ZERO_ON_DEGENERACY");
|
||||
if (Base::params_.linearizationMode != gtsam::HESSIAN)
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionRigFactor: "
|
||||
"linearizationMode must be set to HESSIAN");
|
||||
}
|
||||
|
||||
/** Virtual destructor */
|
||||
~SmartProjectionRigFactor() override = default;
|
||||
|
||||
/**
|
||||
* add a new measurement, corresponding to an observation from pose "poseKey"
|
||||
* and taken from the camera in the rig identified by "cameraId"
|
||||
* @param measured 2-dimensional location of the projection of a
|
||||
* single landmark in a single view (the measurement)
|
||||
* @param poseKey key corresponding to the body pose of the camera taking the
|
||||
* measurement
|
||||
* @param cameraId ID of the camera in the rig taking the measurement (default
|
||||
* 0)
|
||||
*/
|
||||
void add(const Point2& measured, const Key& poseKey,
|
||||
const size_t& cameraId = 0) {
|
||||
// store measurement and key
|
||||
this->measured_.push_back(measured);
|
||||
this->nonUniqueKeys_.push_back(poseKey);
|
||||
|
||||
// also store keys in the keys_ vector: these keys are assumed to be
|
||||
// unique, so we avoid duplicates here
|
||||
if (std::find(this->keys_.begin(), this->keys_.end(), poseKey) ==
|
||||
this->keys_.end())
|
||||
this->keys_.push_back(poseKey); // add only unique keys
|
||||
|
||||
// store id of the camera taking the measurement
|
||||
cameraIds_.push_back(cameraId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Variant of the previous "add" function in which we include multiple
|
||||
* measurements
|
||||
* @param measurements vector of the 2m dimensional location of the projection
|
||||
* of a single landmark in the m views (the measurements)
|
||||
* @param poseKeys keys corresponding to the body poses of the cameras taking
|
||||
* the measurements
|
||||
* @param cameraIds IDs of the cameras in the rig taking each measurement
|
||||
* (same order as the measurements)
|
||||
*/
|
||||
void add(const Point2Vector& measurements, const KeyVector& poseKeys,
|
||||
const FastVector<size_t>& cameraIds = FastVector<size_t>()) {
|
||||
if (poseKeys.size() != measurements.size() ||
|
||||
(poseKeys.size() != cameraIds.size() && cameraIds.size() != 0)) {
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionRigFactor: "
|
||||
"trying to add inconsistent inputs");
|
||||
}
|
||||
if (cameraIds.size() == 0 && cameraRig_->size() > 1) {
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionRigFactor: "
|
||||
"camera rig includes multiple camera "
|
||||
"but add did not input cameraIds");
|
||||
}
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
add(measurements[i], poseKeys[i],
|
||||
cameraIds.size() == 0 ? 0 : cameraIds[i]);
|
||||
}
|
||||
}
|
||||
|
||||
/// return (for each observation) the (possibly non unique) keys involved in
|
||||
/// the measurements
|
||||
const KeyVector& nonUniqueKeys() const { return nonUniqueKeys_; }
|
||||
|
||||
/// return the calibration object
|
||||
const boost::shared_ptr<Cameras>& cameraRig() const { return cameraRig_; }
|
||||
|
||||
/// return the calibration object
|
||||
const FastVector<size_t>& cameraIds() const { return cameraIds_; }
|
||||
|
||||
/**
|
||||
* print
|
||||
* @param s optional string naming the factor
|
||||
* @param keyFormatter optional formatter useful for printing Symbols
|
||||
*/
|
||||
void print(
|
||||
const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override {
|
||||
std::cout << s << "SmartProjectionRigFactor: \n ";
|
||||
for (size_t i = 0; i < nonUniqueKeys_.size(); i++) {
|
||||
std::cout << "-- Measurement nr " << i << std::endl;
|
||||
std::cout << "key: " << keyFormatter(nonUniqueKeys_[i]) << std::endl;
|
||||
std::cout << "cameraId: " << cameraIds_[i] << std::endl;
|
||||
(*cameraRig_)[cameraIds_[i]].print("camera in rig:\n");
|
||||
}
|
||||
Base::print("", keyFormatter);
|
||||
}
|
||||
|
||||
/// equals
|
||||
bool equals(const NonlinearFactor& p, double tol = 1e-9) const override {
|
||||
const This* e = dynamic_cast<const This*>(&p);
|
||||
return e && Base::equals(p, tol) && nonUniqueKeys_ == e->nonUniqueKeys() &&
|
||||
cameraRig_->equals(*(e->cameraRig())) &&
|
||||
std::equal(cameraIds_.begin(), cameraIds_.end(),
|
||||
e->cameraIds().begin());
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect all cameras involved in this factor
|
||||
* @param values Values structure which must contain body poses corresponding
|
||||
* to keys involved in this factor
|
||||
* @return vector of cameras
|
||||
*/
|
||||
typename Base::Cameras cameras(const Values& values) const override {
|
||||
typename Base::Cameras cameras;
|
||||
cameras.reserve(nonUniqueKeys_.size()); // preallocate
|
||||
for (size_t i = 0; i < nonUniqueKeys_.size(); i++) {
|
||||
const typename Base::Camera& camera_i = (*cameraRig_)[cameraIds_[i]];
|
||||
const Pose3 world_P_sensor_i =
|
||||
values.at<Pose3>(nonUniqueKeys_[i]) // = world_P_body
|
||||
* camera_i.pose(); // = body_P_cam_i
|
||||
cameras.emplace_back(world_P_sensor_i,
|
||||
make_shared<typename CAMERA::CalibrationType>(
|
||||
camera_i.calibration()));
|
||||
}
|
||||
return cameras;
|
||||
}
|
||||
|
||||
/**
|
||||
* error calculates the error of the factor.
|
||||
*/
|
||||
double error(const Values& values) const override {
|
||||
if (this->active(values)) {
|
||||
return this->totalReprojectionError(this->cameras(values));
|
||||
} else { // else of active flag
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute jacobian F, E and error vector at a given linearization point
|
||||
* @param values Values structure which must contain camera poses
|
||||
* corresponding to keys involved in this factor
|
||||
* @return Return arguments are the camera jacobians Fs (including the
|
||||
* jacobian with respect to both body poses we interpolate from), the point
|
||||
* Jacobian E, and the error vector b. Note that the jacobians are computed
|
||||
* for a given point.
|
||||
*/
|
||||
void computeJacobiansWithTriangulatedPoint(typename Base::FBlocks& Fs,
|
||||
Matrix& E, Vector& b,
|
||||
const Cameras& cameras) const {
|
||||
if (!this->result_) {
|
||||
throw("computeJacobiansWithTriangulatedPoint");
|
||||
} else { // valid result: compute jacobians
|
||||
b = -cameras.reprojectionError(*this->result_, this->measured_, Fs, E);
|
||||
for (size_t i = 0; i < Fs.size(); i++) {
|
||||
const Pose3& body_P_sensor = (*cameraRig_)[cameraIds_[i]].pose();
|
||||
const Pose3 world_P_body = cameras[i].pose() * body_P_sensor.inverse();
|
||||
Eigen::Matrix<double, DimPose, DimPose> H;
|
||||
world_P_body.compose(body_P_sensor, H);
|
||||
Fs.at(i) = Fs.at(i) * H;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// linearize and return a Hessianfactor that is an approximation of error(p)
|
||||
boost::shared_ptr<RegularHessianFactor<DimPose> > createHessianFactor(
|
||||
const Values& values, const double& lambda = 0.0,
|
||||
bool diagonalDamping = false) const {
|
||||
// we may have multiple observation sharing the same keys (e.g., 2 cameras
|
||||
// measuring from the same body pose), hence the number of unique keys may
|
||||
// be smaller than nrMeasurements
|
||||
size_t nrUniqueKeys =
|
||||
this->keys_
|
||||
.size(); // note: by construction, keys_ only contains unique keys
|
||||
|
||||
Cameras cameras = this->cameras(values);
|
||||
|
||||
// Create structures for Hessian Factors
|
||||
std::vector<size_t> js;
|
||||
std::vector<Matrix> Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2);
|
||||
std::vector<Vector> gs(nrUniqueKeys);
|
||||
|
||||
if (this->measured_.size() != cameras.size()) // 1 observation per camera
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionRigFactor: "
|
||||
"measured_.size() inconsistent with input");
|
||||
|
||||
// triangulate 3D point at given linearization point
|
||||
this->triangulateSafe(cameras);
|
||||
|
||||
if (!this->result_) { // failed: return "empty/zero" Hessian
|
||||
if (this->params_.degeneracyMode == ZERO_ON_DEGENERACY) {
|
||||
for (Matrix& m : Gs) m = Matrix::Zero(DimPose, DimPose);
|
||||
for (Vector& v : gs) v = Vector::Zero(DimPose);
|
||||
return boost::make_shared<RegularHessianFactor<DimPose> >(this->keys_,
|
||||
Gs, gs, 0.0);
|
||||
} else {
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionRigFactor: "
|
||||
"only supported degeneracy mode is ZERO_ON_DEGENERACY");
|
||||
}
|
||||
}
|
||||
|
||||
// compute Jacobian given triangulated 3D Point
|
||||
typename Base::FBlocks Fs;
|
||||
Matrix E;
|
||||
Vector b;
|
||||
this->computeJacobiansWithTriangulatedPoint(Fs, E, b, cameras);
|
||||
|
||||
// Whiten using noise model
|
||||
this->noiseModel_->WhitenSystem(E, b);
|
||||
for (size_t i = 0; i < Fs.size(); i++) {
|
||||
Fs[i] = this->noiseModel_->Whiten(Fs[i]);
|
||||
}
|
||||
|
||||
const Matrix3 P = Base::Cameras::PointCov(E, lambda, diagonalDamping);
|
||||
|
||||
// Build augmented Hessian (with last row/column being the information
|
||||
// vector) Note: we need to get the augumented hessian wrt the unique keys
|
||||
// in key_
|
||||
SymmetricBlockMatrix augmentedHessianUniqueKeys =
|
||||
Base::Cameras::template SchurComplementAndRearrangeBlocks<3, 6, 6>(
|
||||
Fs, E, P, b, nonUniqueKeys_, this->keys_);
|
||||
|
||||
return boost::make_shared<RegularHessianFactor<DimPose> >(
|
||||
this->keys_, augmentedHessianUniqueKeys);
|
||||
}
|
||||
|
||||
/**
|
||||
* Linearize to Gaussian Factor (possibly adding a damping factor Lambda for
|
||||
* LM)
|
||||
* @param values Values structure which must contain camera poses and
|
||||
* extrinsic pose for this factor
|
||||
* @return a Gaussian factor
|
||||
*/
|
||||
boost::shared_ptr<GaussianFactor> linearizeDamped(
|
||||
const Values& values, const double& lambda = 0.0) const {
|
||||
// depending on flag set on construction we may linearize to different
|
||||
// linear factors
|
||||
switch (this->params_.linearizationMode) {
|
||||
case HESSIAN:
|
||||
return this->createHessianFactor(values, lambda);
|
||||
default:
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionRigFactor: unknown linearization mode");
|
||||
}
|
||||
}
|
||||
|
||||
/// linearize
|
||||
boost::shared_ptr<GaussianFactor> linearize(
|
||||
const Values& values) const override {
|
||||
return this->linearizeDamped(values);
|
||||
}
|
||||
|
||||
private:
|
||||
/// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
// ar& BOOST_SERIALIZATION_NVP(nonUniqueKeys_);
|
||||
// ar& BOOST_SERIALIZATION_NVP(cameraRig_);
|
||||
// ar& BOOST_SERIALIZATION_NVP(cameraIds_);
|
||||
}
|
||||
};
|
||||
// end of class declaration
|
||||
|
||||
/// traits
|
||||
template <class CAMERA>
|
||||
struct traits<SmartProjectionRigFactor<CAMERA> >
|
||||
: public Testable<SmartProjectionRigFactor<CAMERA> > {};
|
||||
|
||||
} // namespace gtsam
|
|
@ -22,6 +22,7 @@
|
|||
#include <gtsam/slam/GeneralSFMFactor.h>
|
||||
#include <gtsam/geometry/Cal3_S2.h>
|
||||
#include <gtsam/geometry/Cal3Bundler.h>
|
||||
#include "../SmartProjectionRigFactor.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
@ -68,7 +69,9 @@ SmartProjectionParams params;
|
|||
// default Cal3_S2 poses
|
||||
namespace vanillaPose {
|
||||
typedef PinholePose<Cal3_S2> Camera;
|
||||
typedef CameraSet<Camera> Cameras;
|
||||
typedef SmartProjectionPoseFactor<Cal3_S2> SmartFactor;
|
||||
typedef SmartProjectionRigFactor<Camera> SmartRigFactor;
|
||||
static Cal3_S2::shared_ptr sharedK(new Cal3_S2(fov, w, h));
|
||||
Camera level_camera(level_pose, sharedK);
|
||||
Camera level_camera_right(pose_right, sharedK);
|
||||
|
@ -81,7 +84,9 @@ Camera cam3(pose_above, sharedK);
|
|||
// default Cal3_S2 poses
|
||||
namespace vanillaPose2 {
|
||||
typedef PinholePose<Cal3_S2> Camera;
|
||||
typedef CameraSet<Camera> Cameras;
|
||||
typedef SmartProjectionPoseFactor<Cal3_S2> SmartFactor;
|
||||
typedef SmartProjectionRigFactor<Camera> SmartRigFactor;
|
||||
static Cal3_S2::shared_ptr sharedK2(new Cal3_S2(1500, 1200, 0, 640, 480));
|
||||
Camera level_camera(level_pose, sharedK2);
|
||||
Camera level_camera_right(pose_right, sharedK2);
|
||||
|
@ -94,6 +99,7 @@ Camera cam3(pose_above, sharedK2);
|
|||
// Cal3Bundler cameras
|
||||
namespace bundler {
|
||||
typedef PinholeCamera<Cal3Bundler> Camera;
|
||||
typedef CameraSet<Camera> Cameras;
|
||||
typedef SmartProjectionFactor<Camera> SmartFactor;
|
||||
static Cal3Bundler K(500, 1e-3, 1e-3, 0, 0);
|
||||
Camera level_camera(level_pose, K);
|
||||
|
@ -110,7 +116,9 @@ typedef GeneralSFMFactor<Camera, Point3> SFMFactor;
|
|||
// Cal3Bundler poses
|
||||
namespace bundlerPose {
|
||||
typedef PinholePose<Cal3Bundler> Camera;
|
||||
typedef CameraSet<Camera> Cameras;
|
||||
typedef SmartProjectionPoseFactor<Cal3Bundler> SmartFactor;
|
||||
typedef SmartProjectionRigFactor<Camera> SmartRigFactor;
|
||||
static boost::shared_ptr<Cal3Bundler> sharedBundlerK(
|
||||
new Cal3Bundler(500, 1e-3, 1e-3, 1000, 2000));
|
||||
Camera level_camera(level_pose, sharedBundlerK);
|
||||
|
|
|
@ -127,13 +127,13 @@ TEST(SmartFactorBase, Stereo) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
|
||||
|
||||
TEST(SmartFactorBase, serialize) {
|
||||
using namespace gtsam::serializationTestHelpers;
|
||||
|
|
|
@ -811,13 +811,13 @@ TEST(SmartProjectionFactor, implicitJacobianFactor ) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
|
||||
|
||||
TEST(SmartProjectionFactor, serialize) {
|
||||
using namespace vanilla;
|
||||
|
|
|
@ -1333,13 +1333,13 @@ TEST( SmartProjectionPoseFactor, Cal3BundlerRotationOnly ) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
|
||||
|
||||
TEST(SmartProjectionPoseFactor, serialize) {
|
||||
using namespace vanillaPose;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -5,107 +5,105 @@
|
|||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam_unstable/discrete/Domain.h>
|
||||
#include <gtsam_unstable/discrete/AllDiff.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam_unstable/discrete/AllDiff.h>
|
||||
#include <gtsam_unstable/discrete/Domain.h>
|
||||
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
AllDiff::AllDiff(const DiscreteKeys& dkeys) :
|
||||
Constraint(dkeys.indices()) {
|
||||
for(const DiscreteKey& dkey: dkeys)
|
||||
cardinalities_.insert(dkey);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
AllDiff::AllDiff(const DiscreteKeys& dkeys) : Constraint(dkeys.indices()) {
|
||||
for (const DiscreteKey& dkey : dkeys) cardinalities_.insert(dkey);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void AllDiff::print(const std::string& s,
|
||||
const KeyFormatter& formatter) const {
|
||||
std::cout << s << "AllDiff on ";
|
||||
for (Key dkey: keys_)
|
||||
std::cout << formatter(dkey) << " ";
|
||||
std::cout << std::endl;
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
void AllDiff::print(const std::string& s, const KeyFormatter& formatter) const {
|
||||
std::cout << s << "AllDiff on ";
|
||||
for (Key dkey : keys_) std::cout << formatter(dkey) << " ";
|
||||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double AllDiff::operator()(const Values& values) const {
|
||||
std::set < size_t > taken; // record values taken by keys
|
||||
for(Key dkey: keys_) {
|
||||
size_t value = values.at(dkey); // get the value for that key
|
||||
if (taken.count(value)) return 0.0;// check if value alreday taken
|
||||
taken.insert(value);// if not, record it as taken and keep checking
|
||||
/* ************************************************************************* */
|
||||
double AllDiff::operator()(const Values& values) const {
|
||||
std::set<size_t> taken; // record values taken by keys
|
||||
for (Key dkey : keys_) {
|
||||
size_t value = values.at(dkey); // get the value for that key
|
||||
if (taken.count(value)) return 0.0; // check if value alreday taken
|
||||
taken.insert(value); // if not, record it as taken and keep checking
|
||||
}
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor AllDiff::toDecisionTreeFactor() const {
|
||||
// We will do this by converting the allDif into many BinaryAllDiff
|
||||
// constraints
|
||||
DecisionTreeFactor converted;
|
||||
size_t nrKeys = keys_.size();
|
||||
for (size_t i1 = 0; i1 < nrKeys; i1++)
|
||||
for (size_t i2 = i1 + 1; i2 < nrKeys; i2++) {
|
||||
BinaryAllDiff binary12(discreteKey(i1), discreteKey(i2));
|
||||
converted = converted * binary12.toDecisionTreeFactor();
|
||||
}
|
||||
return 1.0;
|
||||
}
|
||||
return converted;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor AllDiff::toDecisionTreeFactor() const {
|
||||
// We will do this by converting the allDif into many BinaryAllDiff constraints
|
||||
DecisionTreeFactor converted;
|
||||
size_t nrKeys = keys_.size();
|
||||
for (size_t i1 = 0; i1 < nrKeys; i1++)
|
||||
for (size_t i2 = i1 + 1; i2 < nrKeys; i2++) {
|
||||
BinaryAllDiff binary12(discreteKey(i1),discreteKey(i2));
|
||||
converted = converted * binary12.toDecisionTreeFactor();
|
||||
}
|
||||
return converted;
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor AllDiff::operator*(const DecisionTreeFactor& f) const {
|
||||
// TODO: can we do this more efficiently?
|
||||
return toDecisionTreeFactor() * f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor AllDiff::operator*(const DecisionTreeFactor& f) const {
|
||||
// TODO: can we do this more efficiently?
|
||||
return toDecisionTreeFactor() * f;
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
bool AllDiff::ensureArcConsistency(size_t j,
|
||||
std::vector<Domain>& domains) const {
|
||||
// Though strictly not part of allDiff, we check for
|
||||
// a value in domains[j] that does not occur in any other connected domain.
|
||||
// If found, we make this a singleton...
|
||||
// TODO: make a new constraint where this really is true
|
||||
Domain& Dj = domains[j];
|
||||
if (Dj.checkAllDiff(keys_, domains)) return true;
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool AllDiff::ensureArcConsistency(size_t j, std::vector<Domain>& domains) const {
|
||||
// Though strictly not part of allDiff, we check for
|
||||
// a value in domains[j] that does not occur in any other connected domain.
|
||||
// If found, we make this a singleton...
|
||||
// TODO: make a new constraint where this really is true
|
||||
Domain& Dj = domains[j];
|
||||
if (Dj.checkAllDiff(keys_, domains)) return true;
|
||||
|
||||
// Check all other domains for singletons and erase corresponding values
|
||||
// This is the same as arc-consistency on the equivalent binary constraints
|
||||
bool changed = false;
|
||||
for(Key k: keys_)
|
||||
if (k != j) {
|
||||
const Domain& Dk = domains[k];
|
||||
if (Dk.isSingleton()) { // check if singleton
|
||||
size_t value = Dk.firstValue();
|
||||
if (Dj.contains(value)) {
|
||||
Dj.erase(value); // erase value if true
|
||||
changed = true;
|
||||
}
|
||||
// Check all other domains for singletons and erase corresponding values
|
||||
// This is the same as arc-consistency on the equivalent binary constraints
|
||||
bool changed = false;
|
||||
for (Key k : keys_)
|
||||
if (k != j) {
|
||||
const Domain& Dk = domains[k];
|
||||
if (Dk.isSingleton()) { // check if singleton
|
||||
size_t value = Dk.firstValue();
|
||||
if (Dj.contains(value)) {
|
||||
Dj.erase(value); // erase value if true
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr AllDiff::partiallyApply(const Values& values) const {
|
||||
DiscreteKeys newKeys;
|
||||
// loop over keys and add them only if they do not appear in values
|
||||
for(Key k: keys_)
|
||||
if (values.find(k) == values.end()) {
|
||||
newKeys.push_back(DiscreteKey(k,cardinalities_.at(k)));
|
||||
}
|
||||
return boost::make_shared<AllDiff>(newKeys);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr AllDiff::partiallyApply(const Values& values) const {
|
||||
DiscreteKeys newKeys;
|
||||
// loop over keys and add them only if they do not appear in values
|
||||
for (Key k : keys_)
|
||||
if (values.find(k) == values.end()) {
|
||||
newKeys.push_back(DiscreteKey(k, cardinalities_.at(k)));
|
||||
}
|
||||
return boost::make_shared<AllDiff>(newKeys);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr AllDiff::partiallyApply(
|
||||
const std::vector<Domain>& domains) const {
|
||||
DiscreteFactor::Values known;
|
||||
for(Key k: keys_) {
|
||||
const Domain& Dk = domains[k];
|
||||
if (Dk.isSingleton())
|
||||
known[k] = Dk.firstValue();
|
||||
}
|
||||
return partiallyApply(known);
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr AllDiff::partiallyApply(
|
||||
const std::vector<Domain>& domains) const {
|
||||
DiscreteFactor::Values known;
|
||||
for (Key k : keys_) {
|
||||
const Domain& Dk = domains[k];
|
||||
if (Dk.isSingleton()) known[k] = Dk.firstValue();
|
||||
}
|
||||
return partiallyApply(known);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -7,71 +7,71 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam_unstable/discrete/BinaryAllDiff.h>
|
||||
#include <gtsam/discrete/DiscreteKey.h>
|
||||
#include <gtsam_unstable/discrete/BinaryAllDiff.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* General AllDiff constraint
|
||||
* Returns 1 if values for all keys are different, 0 otherwise
|
||||
* DiscreteFactors are all awkward in that they have to store two types of keys:
|
||||
* for each variable we have a Key and an Key. In this factor, we
|
||||
* keep the Indices locally, and the Indices are stored in IndexFactor.
|
||||
/**
|
||||
* General AllDiff constraint
|
||||
* Returns 1 if values for all keys are different, 0 otherwise
|
||||
* DiscreteFactors are all awkward in that they have to store two types of keys:
|
||||
* for each variable we have a Key and an Key. In this factor, we
|
||||
* keep the Indices locally, and the Indices are stored in IndexFactor.
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT AllDiff : public Constraint {
|
||||
std::map<Key, size_t> cardinalities_;
|
||||
|
||||
DiscreteKey discreteKey(size_t i) const {
|
||||
Key j = keys_[i];
|
||||
return DiscreteKey(j, cardinalities_.at(j));
|
||||
}
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
AllDiff(const DiscreteKeys& dkeys);
|
||||
|
||||
// print
|
||||
void print(const std::string& s = "", const KeyFormatter& formatter =
|
||||
DefaultKeyFormatter) const override;
|
||||
|
||||
/// equals
|
||||
bool equals(const DiscreteFactor& other, double tol) const override {
|
||||
if (!dynamic_cast<const AllDiff*>(&other))
|
||||
return false;
|
||||
else {
|
||||
const AllDiff& f(static_cast<const AllDiff&>(other));
|
||||
return cardinalities_.size() == f.cardinalities_.size() &&
|
||||
std::equal(cardinalities_.begin(), cardinalities_.end(),
|
||||
f.cardinalities_.begin());
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate value = expensive !
|
||||
double operator()(const Values& values) const override;
|
||||
|
||||
/// Convert into a decisiontree, can be *very* expensive !
|
||||
DecisionTreeFactor toDecisionTreeFactor() const override;
|
||||
|
||||
/// Multiply into a decisiontree
|
||||
DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override;
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* Arc-consistency involves creating binaryAllDiff constraints
|
||||
* In which case the combinatorial hyper-arc explosion disappears.
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT AllDiff: public Constraint {
|
||||
bool ensureArcConsistency(size_t j,
|
||||
std::vector<Domain>& domains) const override;
|
||||
|
||||
std::map<Key,size_t> cardinalities_;
|
||||
/// Partially apply known values
|
||||
Constraint::shared_ptr partiallyApply(const Values&) const override;
|
||||
|
||||
DiscreteKey discreteKey(size_t i) const {
|
||||
Key j = keys_[i];
|
||||
return DiscreteKey(j,cardinalities_.at(j));
|
||||
}
|
||||
/// Partially apply known values, domain version
|
||||
Constraint::shared_ptr partiallyApply(
|
||||
const std::vector<Domain>&) const override;
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
/// Constructor
|
||||
AllDiff(const DiscreteKeys& dkeys);
|
||||
|
||||
// print
|
||||
void print(const std::string& s = "",
|
||||
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/// equals
|
||||
bool equals(const DiscreteFactor& other, double tol) const override {
|
||||
if(!dynamic_cast<const AllDiff*>(&other))
|
||||
return false;
|
||||
else {
|
||||
const AllDiff& f(static_cast<const AllDiff&>(other));
|
||||
return cardinalities_.size() == f.cardinalities_.size()
|
||||
&& std::equal(cardinalities_.begin(), cardinalities_.end(),
|
||||
f.cardinalities_.begin());
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate value = expensive !
|
||||
double operator()(const Values& values) const override;
|
||||
|
||||
/// Convert into a decisiontree, can be *very* expensive !
|
||||
DecisionTreeFactor toDecisionTreeFactor() const override;
|
||||
|
||||
/// Multiply into a decisiontree
|
||||
DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override;
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* Arc-consistency involves creating binaryAllDiff constraints
|
||||
* In which case the combinatorial hyper-arc explosion disappears.
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
*/
|
||||
bool ensureArcConsistency(size_t j, std::vector<Domain>& domains) const override;
|
||||
|
||||
/// Partially apply known values
|
||||
Constraint::shared_ptr partiallyApply(const Values&) const override;
|
||||
|
||||
/// Partially apply known values, domain version
|
||||
Constraint::shared_ptr partiallyApply(const std::vector<Domain>&) const override;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -7,94 +7,93 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam_unstable/discrete/Domain.h>
|
||||
#include <gtsam_unstable/discrete/Constraint.h>
|
||||
#include <gtsam/discrete/DecisionTreeFactor.h>
|
||||
#include <gtsam_unstable/discrete/Constraint.h>
|
||||
#include <gtsam_unstable/discrete/Domain.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Binary AllDiff constraint
|
||||
* Returns 1 if values for two keys are different, 0 otherwise
|
||||
* DiscreteFactors are all awkward in that they have to store two types of keys:
|
||||
* for each variable we have a Index and an Index. In this factor, we
|
||||
* keep the Indices locally, and the Indices are stored in IndexFactor.
|
||||
*/
|
||||
class BinaryAllDiff: public Constraint {
|
||||
/**
|
||||
* Binary AllDiff constraint
|
||||
* Returns 1 if values for two keys are different, 0 otherwise
|
||||
* DiscreteFactors are all awkward in that they have to store two types of keys:
|
||||
* for each variable we have a Index and an Index. In this factor, we
|
||||
* keep the Indices locally, and the Indices are stored in IndexFactor.
|
||||
*/
|
||||
class BinaryAllDiff : public Constraint {
|
||||
size_t cardinality0_, cardinality1_; /// cardinality
|
||||
|
||||
size_t cardinality0_, cardinality1_; /// cardinality
|
||||
public:
|
||||
/// Constructor
|
||||
BinaryAllDiff(const DiscreteKey& key1, const DiscreteKey& key2)
|
||||
: Constraint(key1.first, key2.first),
|
||||
cardinality0_(key1.second),
|
||||
cardinality1_(key2.second) {}
|
||||
|
||||
public:
|
||||
// print
|
||||
void print(
|
||||
const std::string& s = "",
|
||||
const KeyFormatter& formatter = DefaultKeyFormatter) const override {
|
||||
std::cout << s << "BinaryAllDiff on " << formatter(keys_[0]) << " and "
|
||||
<< formatter(keys_[1]) << std::endl;
|
||||
}
|
||||
|
||||
/// Constructor
|
||||
BinaryAllDiff(const DiscreteKey& key1, const DiscreteKey& key2) :
|
||||
Constraint(key1.first, key2.first),
|
||||
cardinality0_(key1.second), cardinality1_(key2.second) {
|
||||
}
|
||||
|
||||
// print
|
||||
void print(const std::string& s = "",
|
||||
const KeyFormatter& formatter = DefaultKeyFormatter) const override {
|
||||
std::cout << s << "BinaryAllDiff on " << formatter(keys_[0]) << " and "
|
||||
<< formatter(keys_[1]) << std::endl;
|
||||
}
|
||||
|
||||
/// equals
|
||||
bool equals(const DiscreteFactor& other, double tol) const override {
|
||||
if(!dynamic_cast<const BinaryAllDiff*>(&other))
|
||||
return false;
|
||||
else {
|
||||
const BinaryAllDiff& f(static_cast<const BinaryAllDiff&>(other));
|
||||
return (cardinality0_==f.cardinality0_) && (cardinality1_==f.cardinality1_);
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate value
|
||||
double operator()(const Values& values) const override {
|
||||
return (double) (values.at(keys_[0]) != values.at(keys_[1]));
|
||||
}
|
||||
|
||||
/// Convert into a decisiontree
|
||||
DecisionTreeFactor toDecisionTreeFactor() const override {
|
||||
DiscreteKeys keys;
|
||||
keys.push_back(DiscreteKey(keys_[0],cardinality0_));
|
||||
keys.push_back(DiscreteKey(keys_[1],cardinality1_));
|
||||
std::vector<double> table;
|
||||
for (size_t i1 = 0; i1 < cardinality0_; i1++)
|
||||
for (size_t i2 = 0; i2 < cardinality1_; i2++)
|
||||
table.push_back(i1 != i2);
|
||||
DecisionTreeFactor converted(keys, table);
|
||||
return converted;
|
||||
}
|
||||
|
||||
/// Multiply into a decisiontree
|
||||
DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override {
|
||||
// TODO: can we do this more efficiently?
|
||||
return toDecisionTreeFactor() * f;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
*/
|
||||
///
|
||||
bool ensureArcConsistency(size_t j, std::vector<Domain>& domains) const override {
|
||||
// throw std::runtime_error(
|
||||
// "BinaryAllDiff::ensureArcConsistency not implemented");
|
||||
/// equals
|
||||
bool equals(const DiscreteFactor& other, double tol) const override {
|
||||
if (!dynamic_cast<const BinaryAllDiff*>(&other))
|
||||
return false;
|
||||
else {
|
||||
const BinaryAllDiff& f(static_cast<const BinaryAllDiff&>(other));
|
||||
return (cardinality0_ == f.cardinality0_) &&
|
||||
(cardinality1_ == f.cardinality1_);
|
||||
}
|
||||
}
|
||||
|
||||
/// Partially apply known values
|
||||
Constraint::shared_ptr partiallyApply(const Values&) const override {
|
||||
throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented");
|
||||
}
|
||||
/// Calculate value
|
||||
double operator()(const Values& values) const override {
|
||||
return (double)(values.at(keys_[0]) != values.at(keys_[1]));
|
||||
}
|
||||
|
||||
/// Partially apply known values, domain version
|
||||
Constraint::shared_ptr partiallyApply(
|
||||
const std::vector<Domain>&) const override {
|
||||
throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented");
|
||||
}
|
||||
};
|
||||
/// Convert into a decisiontree
|
||||
DecisionTreeFactor toDecisionTreeFactor() const override {
|
||||
DiscreteKeys keys;
|
||||
keys.push_back(DiscreteKey(keys_[0], cardinality0_));
|
||||
keys.push_back(DiscreteKey(keys_[1], cardinality1_));
|
||||
std::vector<double> table;
|
||||
for (size_t i1 = 0; i1 < cardinality0_; i1++)
|
||||
for (size_t i2 = 0; i2 < cardinality1_; i2++) table.push_back(i1 != i2);
|
||||
DecisionTreeFactor converted(keys, table);
|
||||
return converted;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
/// Multiply into a decisiontree
|
||||
DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override {
|
||||
// TODO: can we do this more efficiently?
|
||||
return toDecisionTreeFactor() * f;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
*/
|
||||
bool ensureArcConsistency(size_t j,
|
||||
std::vector<Domain>& domains) const override {
|
||||
// throw std::runtime_error(
|
||||
// "BinaryAllDiff::ensureArcConsistency not implemented");
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Partially apply known values
|
||||
Constraint::shared_ptr partiallyApply(const Values&) const override {
|
||||
throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented");
|
||||
}
|
||||
|
||||
/// Partially apply known values, domain version
|
||||
Constraint::shared_ptr partiallyApply(
|
||||
const std::vector<Domain>&) const override {
|
||||
throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented");
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -5,99 +5,104 @@
|
|||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam_unstable/discrete/Domain.h>
|
||||
#include <gtsam_unstable/discrete/CSP.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam_unstable/discrete/CSP.h>
|
||||
#include <gtsam_unstable/discrete/Domain.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/// Find the best total assignment - can be expensive
|
||||
CSP::sharedValues CSP::optimalAssignment() const {
|
||||
DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential();
|
||||
sharedValues mpe = chordal->optimize();
|
||||
return mpe;
|
||||
}
|
||||
/// Find the best total assignment - can be expensive
|
||||
CSP::sharedValues CSP::optimalAssignment() const {
|
||||
DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential();
|
||||
sharedValues mpe = chordal->optimize();
|
||||
return mpe;
|
||||
}
|
||||
|
||||
/// Find the best total assignment - can be expensive
|
||||
CSP::sharedValues CSP::optimalAssignment(const Ordering& ordering) const {
|
||||
DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(ordering);
|
||||
sharedValues mpe = chordal->optimize();
|
||||
return mpe;
|
||||
}
|
||||
/// Find the best total assignment - can be expensive
|
||||
CSP::sharedValues CSP::optimalAssignment(const Ordering& ordering) const {
|
||||
DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(ordering);
|
||||
sharedValues mpe = chordal->optimize();
|
||||
return mpe;
|
||||
}
|
||||
|
||||
void CSP::runArcConsistency(size_t cardinality, size_t nrIterations, bool print) const {
|
||||
// Create VariableIndex
|
||||
VariableIndex index(*this);
|
||||
// index.print();
|
||||
void CSP::runArcConsistency(size_t cardinality, size_t nrIterations,
|
||||
bool print) const {
|
||||
// Create VariableIndex
|
||||
VariableIndex index(*this);
|
||||
// index.print();
|
||||
|
||||
size_t n = index.size();
|
||||
size_t n = index.size();
|
||||
|
||||
// Initialize domains
|
||||
std::vector < Domain > domains;
|
||||
for (size_t j = 0; j < n; j++)
|
||||
domains.push_back(Domain(DiscreteKey(j,cardinality)));
|
||||
// Initialize domains
|
||||
std::vector<Domain> domains;
|
||||
for (size_t j = 0; j < n; j++)
|
||||
domains.push_back(Domain(DiscreteKey(j, cardinality)));
|
||||
|
||||
// Create array of flags indicating a domain changed or not
|
||||
std::vector<bool> changed(n);
|
||||
// Create array of flags indicating a domain changed or not
|
||||
std::vector<bool> changed(n);
|
||||
|
||||
// iterate nrIterations over entire grid
|
||||
for (size_t it = 0; it < nrIterations; it++) {
|
||||
bool anyChange = false;
|
||||
// iterate over all cells
|
||||
for (size_t v = 0; v < n; v++) {
|
||||
// keep track of which domains changed
|
||||
changed[v] = false;
|
||||
// loop over all factors/constraints for variable v
|
||||
const FactorIndices& factors = index[v];
|
||||
for(size_t f: factors) {
|
||||
// if not already a singleton
|
||||
if (!domains[v].isSingleton()) {
|
||||
// get the constraint and call its ensureArcConsistency method
|
||||
Constraint::shared_ptr constraint = boost::dynamic_pointer_cast<Constraint>((*this)[f]);
|
||||
if (!constraint) throw runtime_error("CSP:runArcConsistency: non-constraint factor");
|
||||
changed[v] = constraint->ensureArcConsistency(v,domains) || changed[v];
|
||||
}
|
||||
} // f
|
||||
if (changed[v]) anyChange = true;
|
||||
} // v
|
||||
if (!anyChange) break;
|
||||
// TODO: Sudoku specific hack
|
||||
if (print) {
|
||||
if (cardinality == 9 && n == 81) {
|
||||
for (size_t i = 0, v = 0; i < (size_t)std::sqrt((double)n); i++) {
|
||||
for (size_t j = 0; j < (size_t)std::sqrt((double)n); j++, v++) {
|
||||
if (changed[v]) cout << "*";
|
||||
domains[v].print();
|
||||
cout << "\t";
|
||||
} // i
|
||||
cout << endl;
|
||||
} // j
|
||||
} else {
|
||||
for (size_t v = 0; v < n; v++) {
|
||||
// iterate nrIterations over entire grid
|
||||
for (size_t it = 0; it < nrIterations; it++) {
|
||||
bool anyChange = false;
|
||||
// iterate over all cells
|
||||
for (size_t v = 0; v < n; v++) {
|
||||
// keep track of which domains changed
|
||||
changed[v] = false;
|
||||
// loop over all factors/constraints for variable v
|
||||
const FactorIndices& factors = index[v];
|
||||
for (size_t f : factors) {
|
||||
// if not already a singleton
|
||||
if (!domains[v].isSingleton()) {
|
||||
// get the constraint and call its ensureArcConsistency method
|
||||
Constraint::shared_ptr constraint =
|
||||
boost::dynamic_pointer_cast<Constraint>((*this)[f]);
|
||||
if (!constraint)
|
||||
throw runtime_error("CSP:runArcConsistency: non-constraint factor");
|
||||
changed[v] =
|
||||
constraint->ensureArcConsistency(v, domains) || changed[v];
|
||||
}
|
||||
} // f
|
||||
if (changed[v]) anyChange = true;
|
||||
} // v
|
||||
if (!anyChange) break;
|
||||
// TODO: Sudoku specific hack
|
||||
if (print) {
|
||||
if (cardinality == 9 && n == 81) {
|
||||
for (size_t i = 0, v = 0; i < (size_t)std::sqrt((double)n); i++) {
|
||||
for (size_t j = 0; j < (size_t)std::sqrt((double)n); j++, v++) {
|
||||
if (changed[v]) cout << "*";
|
||||
domains[v].print();
|
||||
cout << "\t";
|
||||
} // v
|
||||
}
|
||||
cout << endl;
|
||||
} // print
|
||||
} // it
|
||||
} // i
|
||||
cout << endl;
|
||||
} // j
|
||||
} else {
|
||||
for (size_t v = 0; v < n; v++) {
|
||||
if (changed[v]) cout << "*";
|
||||
domains[v].print();
|
||||
cout << "\t";
|
||||
} // v
|
||||
}
|
||||
cout << endl;
|
||||
} // print
|
||||
} // it
|
||||
|
||||
#ifndef INPROGRESS
|
||||
// Now create new problem with all singleton variables removed
|
||||
// We do this by adding simplifying all factors using parial application
|
||||
// TODO: create a new ordering as we go, to ensure a connected graph
|
||||
// KeyOrdering ordering;
|
||||
// vector<Index> dkeys;
|
||||
for(const DiscreteFactor::shared_ptr& f: factors_) {
|
||||
Constraint::shared_ptr constraint = boost::dynamic_pointer_cast<Constraint>(f);
|
||||
if (!constraint) throw runtime_error("CSP:runArcConsistency: non-constraint factor");
|
||||
Constraint::shared_ptr reduced = constraint->partiallyApply(domains);
|
||||
if (print) reduced->print();
|
||||
}
|
||||
#endif
|
||||
// Now create new problem with all singleton variables removed
|
||||
// We do this by adding simplifying all factors using parial application
|
||||
// TODO: create a new ordering as we go, to ensure a connected graph
|
||||
// KeyOrdering ordering;
|
||||
// vector<Index> dkeys;
|
||||
for (const DiscreteFactor::shared_ptr& f : factors_) {
|
||||
Constraint::shared_ptr constraint =
|
||||
boost::dynamic_pointer_cast<Constraint>(f);
|
||||
if (!constraint)
|
||||
throw runtime_error("CSP:runArcConsistency: non-constraint factor");
|
||||
Constraint::shared_ptr reduced = constraint->partiallyApply(domains);
|
||||
if (print) reduced->print();
|
||||
}
|
||||
} // gtsam
|
||||
|
||||
#endif
|
||||
}
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -7,84 +7,81 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
#include <gtsam_unstable/discrete/AllDiff.h>
|
||||
#include <gtsam_unstable/discrete/SingleValue.h>
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Constraint Satisfaction Problem class
|
||||
* A specialization of a DiscreteFactorGraph.
|
||||
* It knows about CSP-specific constraints and algorithms
|
||||
/**
|
||||
* Constraint Satisfaction Problem class
|
||||
* A specialization of a DiscreteFactorGraph.
|
||||
* It knows about CSP-specific constraints and algorithms
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT CSP : public DiscreteFactorGraph {
|
||||
public:
|
||||
/** A map from keys to values */
|
||||
typedef KeyVector Indices;
|
||||
typedef Assignment<Key> Values;
|
||||
typedef boost::shared_ptr<Values> sharedValues;
|
||||
|
||||
public:
|
||||
// /// Constructor
|
||||
// CSP() {
|
||||
// }
|
||||
|
||||
/// Add a unary constraint, allowing only a single value
|
||||
void addSingleValue(const DiscreteKey& dkey, size_t value) {
|
||||
boost::shared_ptr<SingleValue> factor(new SingleValue(dkey, value));
|
||||
push_back(factor);
|
||||
}
|
||||
|
||||
/// Add a binary AllDiff constraint
|
||||
void addAllDiff(const DiscreteKey& key1, const DiscreteKey& key2) {
|
||||
boost::shared_ptr<BinaryAllDiff> factor(new BinaryAllDiff(key1, key2));
|
||||
push_back(factor);
|
||||
}
|
||||
|
||||
/// Add a general AllDiff constraint
|
||||
void addAllDiff(const DiscreteKeys& dkeys) {
|
||||
boost::shared_ptr<AllDiff> factor(new AllDiff(dkeys));
|
||||
push_back(factor);
|
||||
}
|
||||
|
||||
// /** return product of all factors as a single factor */
|
||||
// DecisionTreeFactor product() const {
|
||||
// DecisionTreeFactor result;
|
||||
// for(const sharedFactor& factor: *this)
|
||||
// if (factor) result = (*factor) * result;
|
||||
// return result;
|
||||
// }
|
||||
|
||||
/// Find the best total assignment - can be expensive
|
||||
sharedValues optimalAssignment() const;
|
||||
|
||||
/// Find the best total assignment - can be expensive
|
||||
sharedValues optimalAssignment(const Ordering& ordering) const;
|
||||
|
||||
// /*
|
||||
// * Perform loopy belief propagation
|
||||
// * True belief propagation would check for each value in domain
|
||||
// * whether any satisfying separator assignment can be found.
|
||||
// * This corresponds to hyper-arc consistency in CSP speak.
|
||||
// * This can be done by creating a mini-factor graph and search.
|
||||
// * For a nine-by-nine Sudoku, the search tree will be 8+6+6=20 levels
|
||||
// deep.
|
||||
// * It will be very expensive to exclude values that way.
|
||||
// */
|
||||
// void applyBeliefPropagation(size_t nrIterations = 10) const;
|
||||
|
||||
/*
|
||||
* Apply arc-consistency ~ Approximate loopy belief propagation
|
||||
* We need to give the domains to a constraint, and it returns
|
||||
* a domain whose values don't conflict in the arc-consistency way.
|
||||
* TODO: should get cardinality from Indices
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT CSP: public DiscreteFactorGraph {
|
||||
public:
|
||||
|
||||
/** A map from keys to values */
|
||||
typedef KeyVector Indices;
|
||||
typedef Assignment<Key> Values;
|
||||
typedef boost::shared_ptr<Values> sharedValues;
|
||||
|
||||
public:
|
||||
|
||||
// /// Constructor
|
||||
// CSP() {
|
||||
// }
|
||||
|
||||
/// Add a unary constraint, allowing only a single value
|
||||
void addSingleValue(const DiscreteKey& dkey, size_t value) {
|
||||
boost::shared_ptr<SingleValue> factor(new SingleValue(dkey, value));
|
||||
push_back(factor);
|
||||
}
|
||||
|
||||
/// Add a binary AllDiff constraint
|
||||
void addAllDiff(const DiscreteKey& key1, const DiscreteKey& key2) {
|
||||
boost::shared_ptr<BinaryAllDiff> factor(
|
||||
new BinaryAllDiff(key1, key2));
|
||||
push_back(factor);
|
||||
}
|
||||
|
||||
/// Add a general AllDiff constraint
|
||||
void addAllDiff(const DiscreteKeys& dkeys) {
|
||||
boost::shared_ptr<AllDiff> factor(new AllDiff(dkeys));
|
||||
push_back(factor);
|
||||
}
|
||||
|
||||
// /** return product of all factors as a single factor */
|
||||
// DecisionTreeFactor product() const {
|
||||
// DecisionTreeFactor result;
|
||||
// for(const sharedFactor& factor: *this)
|
||||
// if (factor) result = (*factor) * result;
|
||||
// return result;
|
||||
// }
|
||||
|
||||
/// Find the best total assignment - can be expensive
|
||||
sharedValues optimalAssignment() const;
|
||||
|
||||
/// Find the best total assignment - can be expensive
|
||||
sharedValues optimalAssignment(const Ordering& ordering) const;
|
||||
|
||||
// /*
|
||||
// * Perform loopy belief propagation
|
||||
// * True belief propagation would check for each value in domain
|
||||
// * whether any satisfying separator assignment can be found.
|
||||
// * This corresponds to hyper-arc consistency in CSP speak.
|
||||
// * This can be done by creating a mini-factor graph and search.
|
||||
// * For a nine-by-nine Sudoku, the search tree will be 8+6+6=20 levels deep.
|
||||
// * It will be very expensive to exclude values that way.
|
||||
// */
|
||||
// void applyBeliefPropagation(size_t nrIterations = 10) const;
|
||||
|
||||
/*
|
||||
* Apply arc-consistency ~ Approximate loopy belief propagation
|
||||
* We need to give the domains to a constraint, and it returns
|
||||
* a domain whose values don't conflict in the arc-consistency way.
|
||||
* TODO: should get cardinality from Indices
|
||||
*/
|
||||
void runArcConsistency(size_t cardinality, size_t nrIterations = 10,
|
||||
bool print = false) const;
|
||||
}; // CSP
|
||||
|
||||
} // gtsam
|
||||
void runArcConsistency(size_t cardinality, size_t nrIterations = 10,
|
||||
bool print = false) const;
|
||||
}; // CSP
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -17,77 +17,68 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam_unstable/dllexport.h>
|
||||
#include <gtsam/discrete/DiscreteFactor.h>
|
||||
#include <gtsam_unstable/dllexport.h>
|
||||
|
||||
#include <boost/assign.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
class Domain;
|
||||
class Domain;
|
||||
|
||||
/**
|
||||
* Base class for discrete probabilistic factors
|
||||
* The most general one is the derived DecisionTreeFactor
|
||||
/**
|
||||
* Base class for discrete probabilistic factors
|
||||
* The most general one is the derived DecisionTreeFactor
|
||||
*/
|
||||
class Constraint : public DiscreteFactor {
|
||||
public:
|
||||
typedef boost::shared_ptr<Constraint> shared_ptr;
|
||||
|
||||
protected:
|
||||
/// Construct n-way factor
|
||||
Constraint(const KeyVector& js) : DiscreteFactor(js) {}
|
||||
|
||||
/// Construct unary factor
|
||||
Constraint(Key j) : DiscreteFactor(boost::assign::cref_list_of<1>(j)) {}
|
||||
|
||||
/// Construct binary factor
|
||||
Constraint(Key j1, Key j2)
|
||||
: DiscreteFactor(boost::assign::cref_list_of<2>(j1)(j2)) {}
|
||||
|
||||
/// construct from container
|
||||
template <class KeyIterator>
|
||||
Constraint(KeyIterator beginKey, KeyIterator endKey)
|
||||
: DiscreteFactor(beginKey, endKey) {}
|
||||
|
||||
public:
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/// Default constructor for I/O
|
||||
Constraint();
|
||||
|
||||
/// Virtual destructor
|
||||
~Constraint() override {}
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
*/
|
||||
class Constraint : public DiscreteFactor {
|
||||
virtual bool ensureArcConsistency(size_t j,
|
||||
std::vector<Domain>& domains) const = 0;
|
||||
|
||||
public:
|
||||
/// Partially apply known values
|
||||
virtual shared_ptr partiallyApply(const Values&) const = 0;
|
||||
|
||||
typedef boost::shared_ptr<Constraint> shared_ptr;
|
||||
|
||||
protected:
|
||||
|
||||
/// Construct n-way factor
|
||||
Constraint(const KeyVector& js) :
|
||||
DiscreteFactor(js) {
|
||||
}
|
||||
|
||||
/// Construct unary factor
|
||||
Constraint(Key j) :
|
||||
DiscreteFactor(boost::assign::cref_list_of<1>(j)) {
|
||||
}
|
||||
|
||||
/// Construct binary factor
|
||||
Constraint(Key j1, Key j2) :
|
||||
DiscreteFactor(boost::assign::cref_list_of<2>(j1)(j2)) {
|
||||
}
|
||||
|
||||
/// construct from container
|
||||
template<class KeyIterator>
|
||||
Constraint(KeyIterator beginKey, KeyIterator endKey) :
|
||||
DiscreteFactor(beginKey, endKey) {
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
/// @name Standard Constructors
|
||||
/// @{
|
||||
|
||||
/// Default constructor for I/O
|
||||
Constraint();
|
||||
|
||||
/// Virtual destructor
|
||||
~Constraint() override {}
|
||||
|
||||
/// @}
|
||||
/// @name Standard Interface
|
||||
/// @{
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
*/
|
||||
virtual bool ensureArcConsistency(size_t j, std::vector<Domain>& domains) const = 0;
|
||||
|
||||
/// Partially apply known values
|
||||
virtual shared_ptr partiallyApply(const Values&) const = 0;
|
||||
|
||||
|
||||
/// Partially apply known values, domain version
|
||||
virtual shared_ptr partiallyApply(const std::vector<Domain>&) const = 0;
|
||||
/// @}
|
||||
};
|
||||
/// Partially apply known values, domain version
|
||||
virtual shared_ptr partiallyApply(const std::vector<Domain>&) const = 0;
|
||||
/// @}
|
||||
};
|
||||
// DiscreteFactor
|
||||
|
||||
}// namespace gtsam
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -5,92 +5,89 @@
|
|||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam_unstable/discrete/Domain.h>
|
||||
#include <gtsam/discrete/DecisionTreeFactor.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/discrete/DecisionTreeFactor.h>
|
||||
#include <gtsam_unstable/discrete/Domain.h>
|
||||
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using namespace std;
|
||||
|
||||
/* ************************************************************************* */
|
||||
void Domain::print(const string& s,
|
||||
const KeyFormatter& formatter) const {
|
||||
// cout << s << ": Domain on " << formatter(keys_[0]) << " (j=" <<
|
||||
// formatter(keys_[0]) << ") with values";
|
||||
// for (size_t v: values_) cout << " " << v;
|
||||
// cout << endl;
|
||||
for (size_t v: values_) cout << v;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double Domain::operator()(const Values& values) const {
|
||||
return contains(values.at(keys_[0]));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor Domain::toDecisionTreeFactor() const {
|
||||
DiscreteKeys keys;
|
||||
keys += DiscreteKey(keys_[0],cardinality_);
|
||||
vector<double> table;
|
||||
for (size_t i1 = 0; i1 < cardinality_; ++i1)
|
||||
table.push_back(contains(i1));
|
||||
DecisionTreeFactor converted(keys, table);
|
||||
return converted;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor Domain::operator*(const DecisionTreeFactor& f) const {
|
||||
// TODO: can we do this more efficiently?
|
||||
return toDecisionTreeFactor() * f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool Domain::ensureArcConsistency(size_t j, vector<Domain>& domains) const {
|
||||
if (j != keys_[0]) throw invalid_argument("Domain check on wrong domain");
|
||||
Domain& D = domains[j];
|
||||
for(size_t value: values_)
|
||||
if (!D.contains(value)) throw runtime_error("Unsatisfiable");
|
||||
D = *this;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool Domain::checkAllDiff(const KeyVector keys, vector<Domain>& domains) {
|
||||
Key j = keys_[0];
|
||||
// for all values in this domain
|
||||
for(size_t value: values_) {
|
||||
// for all connected domains
|
||||
for(Key k: keys)
|
||||
// if any domain contains the value we cannot make this domain singleton
|
||||
if (k!=j && domains[k].contains(value))
|
||||
goto found;
|
||||
values_.clear();
|
||||
values_.insert(value);
|
||||
return true; // we changed it
|
||||
found:;
|
||||
}
|
||||
return false; // we did not change it
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr Domain::partiallyApply(
|
||||
const Values& values) const {
|
||||
Values::const_iterator it = values.find(keys_[0]);
|
||||
if (it != values.end() && !contains(it->second)) throw runtime_error(
|
||||
"Domain::partiallyApply: unsatisfiable");
|
||||
return boost::make_shared < Domain > (*this);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr Domain::partiallyApply(
|
||||
const vector<Domain>& domains) const {
|
||||
const Domain& Dk = domains[keys_[0]];
|
||||
if (Dk.isSingleton() && !contains(*Dk.begin())) throw runtime_error(
|
||||
"Domain::partiallyApply: unsatisfiable");
|
||||
return boost::make_shared < Domain > (Dk);
|
||||
}
|
||||
using namespace std;
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
void Domain::print(const string& s, const KeyFormatter& formatter) const {
|
||||
// cout << s << ": Domain on " << formatter(keys_[0]) << " (j=" <<
|
||||
// formatter(keys_[0]) << ") with values";
|
||||
// for (size_t v: values_) cout << " " << v;
|
||||
// cout << endl;
|
||||
for (size_t v : values_) cout << v;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double Domain::operator()(const Values& values) const {
|
||||
return contains(values.at(keys_[0]));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor Domain::toDecisionTreeFactor() const {
|
||||
DiscreteKeys keys;
|
||||
keys += DiscreteKey(keys_[0], cardinality_);
|
||||
vector<double> table;
|
||||
for (size_t i1 = 0; i1 < cardinality_; ++i1) table.push_back(contains(i1));
|
||||
DecisionTreeFactor converted(keys, table);
|
||||
return converted;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor Domain::operator*(const DecisionTreeFactor& f) const {
|
||||
// TODO: can we do this more efficiently?
|
||||
return toDecisionTreeFactor() * f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool Domain::ensureArcConsistency(size_t j, vector<Domain>& domains) const {
|
||||
if (j != keys_[0]) throw invalid_argument("Domain check on wrong domain");
|
||||
Domain& D = domains[j];
|
||||
for (size_t value : values_)
|
||||
if (!D.contains(value)) throw runtime_error("Unsatisfiable");
|
||||
D = *this;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool Domain::checkAllDiff(const KeyVector keys, vector<Domain>& domains) {
|
||||
Key j = keys_[0];
|
||||
// for all values in this domain
|
||||
for (size_t value : values_) {
|
||||
// for all connected domains
|
||||
for (Key k : keys)
|
||||
// if any domain contains the value we cannot make this domain singleton
|
||||
if (k != j && domains[k].contains(value)) goto found;
|
||||
values_.clear();
|
||||
values_.insert(value);
|
||||
return true; // we changed it
|
||||
found:;
|
||||
}
|
||||
return false; // we did not change it
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr Domain::partiallyApply(const Values& values) const {
|
||||
Values::const_iterator it = values.find(keys_[0]);
|
||||
if (it != values.end() && !contains(it->second))
|
||||
throw runtime_error("Domain::partiallyApply: unsatisfiable");
|
||||
return boost::make_shared<Domain>(*this);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr Domain::partiallyApply(
|
||||
const vector<Domain>& domains) const {
|
||||
const Domain& Dk = domains[keys_[0]];
|
||||
if (Dk.isSingleton() && !contains(*Dk.begin()))
|
||||
throw runtime_error("Domain::partiallyApply: unsatisfiable");
|
||||
return boost::make_shared<Domain>(Dk);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -7,111 +7,97 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam_unstable/discrete/Constraint.h>
|
||||
#include <gtsam/discrete/DiscreteKey.h>
|
||||
#include <gtsam_unstable/discrete/Constraint.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Domain restriction constraint
|
||||
/**
|
||||
* Domain restriction constraint
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT Domain : public Constraint {
|
||||
size_t cardinality_; /// Cardinality
|
||||
std::set<size_t> values_; /// allowed values
|
||||
|
||||
public:
|
||||
typedef boost::shared_ptr<Domain> shared_ptr;
|
||||
|
||||
// Constructor on Discrete Key initializes an "all-allowed" domain
|
||||
Domain(const DiscreteKey& dkey)
|
||||
: Constraint(dkey.first), cardinality_(dkey.second) {
|
||||
for (size_t v = 0; v < cardinality_; v++) values_.insert(v);
|
||||
}
|
||||
|
||||
// Constructor on Discrete Key with single allowed value
|
||||
// Consider SingleValue constraint
|
||||
Domain(const DiscreteKey& dkey, size_t v)
|
||||
: Constraint(dkey.first), cardinality_(dkey.second) {
|
||||
values_.insert(v);
|
||||
}
|
||||
|
||||
/// Constructor
|
||||
Domain(const Domain& other)
|
||||
: Constraint(other.keys_[0]), values_(other.values_) {}
|
||||
|
||||
/// insert a value, non const :-(
|
||||
void insert(size_t value) { values_.insert(value); }
|
||||
|
||||
/// erase a value, non const :-(
|
||||
void erase(size_t value) { values_.erase(value); }
|
||||
|
||||
size_t nrValues() const { return values_.size(); }
|
||||
|
||||
bool isSingleton() const { return nrValues() == 1; }
|
||||
|
||||
size_t firstValue() const { return *values_.begin(); }
|
||||
|
||||
// print
|
||||
void print(const std::string& s = "", const KeyFormatter& formatter =
|
||||
DefaultKeyFormatter) const override;
|
||||
|
||||
/// equals
|
||||
bool equals(const DiscreteFactor& other, double tol) const override {
|
||||
if (!dynamic_cast<const Domain*>(&other))
|
||||
return false;
|
||||
else {
|
||||
const Domain& f(static_cast<const Domain&>(other));
|
||||
return (cardinality_ == f.cardinality_) && (values_ == f.values_);
|
||||
}
|
||||
}
|
||||
|
||||
bool contains(size_t value) const { return values_.count(value) > 0; }
|
||||
|
||||
/// Calculate value
|
||||
double operator()(const Values& values) const override;
|
||||
|
||||
/// Convert into a decisiontree
|
||||
DecisionTreeFactor toDecisionTreeFactor() const override;
|
||||
|
||||
/// Multiply into a decisiontree
|
||||
DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override;
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT Domain: public Constraint {
|
||||
bool ensureArcConsistency(size_t j,
|
||||
std::vector<Domain>& domains) const override;
|
||||
|
||||
size_t cardinality_; /// Cardinality
|
||||
std::set<size_t> values_; /// allowed values
|
||||
/**
|
||||
* Check for a value in domain that does not occur in any other connected
|
||||
* domain. If found, we make this a singleton... Called in
|
||||
* AllDiff::ensureArcConsistency
|
||||
* @param keys connected domains through alldiff
|
||||
*/
|
||||
bool checkAllDiff(const KeyVector keys, std::vector<Domain>& domains);
|
||||
|
||||
public:
|
||||
/// Partially apply known values
|
||||
Constraint::shared_ptr partiallyApply(const Values& values) const override;
|
||||
|
||||
typedef boost::shared_ptr<Domain> shared_ptr;
|
||||
/// Partially apply known values, domain version
|
||||
Constraint::shared_ptr partiallyApply(
|
||||
const std::vector<Domain>& domains) const override;
|
||||
};
|
||||
|
||||
// Constructor on Discrete Key initializes an "all-allowed" domain
|
||||
Domain(const DiscreteKey& dkey) :
|
||||
Constraint(dkey.first), cardinality_(dkey.second) {
|
||||
for (size_t v = 0; v < cardinality_; v++)
|
||||
values_.insert(v);
|
||||
}
|
||||
|
||||
// Constructor on Discrete Key with single allowed value
|
||||
// Consider SingleValue constraint
|
||||
Domain(const DiscreteKey& dkey, size_t v) :
|
||||
Constraint(dkey.first), cardinality_(dkey.second) {
|
||||
values_.insert(v);
|
||||
}
|
||||
|
||||
/// Constructor
|
||||
Domain(const Domain& other) :
|
||||
Constraint(other.keys_[0]), values_(other.values_) {
|
||||
}
|
||||
|
||||
/// insert a value, non const :-(
|
||||
void insert(size_t value) {
|
||||
values_.insert(value);
|
||||
}
|
||||
|
||||
/// erase a value, non const :-(
|
||||
void erase(size_t value) {
|
||||
values_.erase(value);
|
||||
}
|
||||
|
||||
size_t nrValues() const {
|
||||
return values_.size();
|
||||
}
|
||||
|
||||
bool isSingleton() const {
|
||||
return nrValues() == 1;
|
||||
}
|
||||
|
||||
size_t firstValue() const {
|
||||
return *values_.begin();
|
||||
}
|
||||
|
||||
// print
|
||||
void print(const std::string& s = "",
|
||||
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/// equals
|
||||
bool equals(const DiscreteFactor& other, double tol) const override {
|
||||
if(!dynamic_cast<const Domain*>(&other))
|
||||
return false;
|
||||
else {
|
||||
const Domain& f(static_cast<const Domain&>(other));
|
||||
return (cardinality_==f.cardinality_) && (values_==f.values_);
|
||||
}
|
||||
}
|
||||
|
||||
bool contains(size_t value) const {
|
||||
return values_.count(value)>0;
|
||||
}
|
||||
|
||||
/// Calculate value
|
||||
double operator()(const Values& values) const override;
|
||||
|
||||
/// Convert into a decisiontree
|
||||
DecisionTreeFactor toDecisionTreeFactor() const override;
|
||||
|
||||
/// Multiply into a decisiontree
|
||||
DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override;
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
*/
|
||||
bool ensureArcConsistency(size_t j, std::vector<Domain>& domains) const override;
|
||||
|
||||
/**
|
||||
* Check for a value in domain that does not occur in any other connected domain.
|
||||
* If found, we make this a singleton... Called in AllDiff::ensureArcConsistency
|
||||
* @param keys connected domains through alldiff
|
||||
*/
|
||||
bool checkAllDiff(const KeyVector keys, std::vector<Domain>& domains);
|
||||
|
||||
/// Partially apply known values
|
||||
Constraint::shared_ptr partiallyApply(const Values& values) const override;
|
||||
|
||||
/// Partially apply known values, domain version
|
||||
Constraint::shared_ptr partiallyApply(
|
||||
const std::vector<Domain>& domains) const override;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -5,298 +5,286 @@
|
|||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam_unstable/discrete/Scheduler.h>
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
#include <gtsam/base/debug.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
#include <gtsam_unstable/discrete/Scheduler.h>
|
||||
|
||||
#include <boost/tokenizer.hpp>
|
||||
|
||||
#include <cmath>
|
||||
#include <fstream>
|
||||
#include <iomanip>
|
||||
#include <cmath>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using namespace std;
|
||||
using namespace std;
|
||||
|
||||
Scheduler::Scheduler(size_t maxNrStudents, const string& filename):
|
||||
maxNrStudents_(maxNrStudents)
|
||||
{
|
||||
typedef boost::tokenizer<boost::escaped_list_separator<char> > Tokenizer;
|
||||
Scheduler::Scheduler(size_t maxNrStudents, const string& filename)
|
||||
: maxNrStudents_(maxNrStudents) {
|
||||
typedef boost::tokenizer<boost::escaped_list_separator<char> > Tokenizer;
|
||||
|
||||
// open file
|
||||
ifstream is(filename.c_str());
|
||||
if (!is) {
|
||||
cerr << "Scheduler: could not open file " << filename << endl;
|
||||
throw runtime_error("Scheduler: could not open file " + filename);
|
||||
}
|
||||
|
||||
string line; // buffer
|
||||
|
||||
// process first line with faculty
|
||||
if (getline(is, line, '\r')) {
|
||||
Tokenizer tok(line);
|
||||
Tokenizer::iterator it = tok.begin();
|
||||
for (++it; it != tok.end(); ++it)
|
||||
addFaculty(*it);
|
||||
}
|
||||
|
||||
// for all remaining lines
|
||||
size_t count = 0;
|
||||
while (getline(is, line, '\r')) {
|
||||
if (count++ > 100) throw runtime_error("reached 100 lines, exiting");
|
||||
Tokenizer tok(line);
|
||||
Tokenizer::iterator it = tok.begin();
|
||||
addSlot(*it++); // add slot
|
||||
// add availability
|
||||
for (; it != tok.end(); ++it)
|
||||
available_ += (it->empty()) ? "0 " : "1 ";
|
||||
available_ += '\n';
|
||||
}
|
||||
} // constructor
|
||||
|
||||
/** addStudent has to be called after adding slots and faculty */
|
||||
void Scheduler::addStudent(const string& studentName,
|
||||
const string& area1, const string& area2,
|
||||
const string& area3, const string& advisor) {
|
||||
assert(nrStudents()<maxNrStudents_);
|
||||
assert(facultyInArea_.count(area1));
|
||||
assert(facultyInArea_.count(area2));
|
||||
assert(facultyInArea_.count(area3));
|
||||
size_t advisorIndex = facultyIndex_[advisor];
|
||||
Student student(nrFaculty(), advisorIndex);
|
||||
student.name_ = studentName;
|
||||
// We fix the ordering by assigning a higher index to the student
|
||||
// and numbering the areas lower
|
||||
Key j = 3*maxNrStudents_ + nrStudents();
|
||||
student.key_ = DiscreteKey(j, nrTimeSlots());
|
||||
Key base = 3*nrStudents();
|
||||
student.keys_[0] = DiscreteKey(base+0, nrFaculty());
|
||||
student.keys_[1] = DiscreteKey(base+1, nrFaculty());
|
||||
student.keys_[2] = DiscreteKey(base+2, nrFaculty());
|
||||
student.areaName_[0] = area1;
|
||||
student.areaName_[1] = area2;
|
||||
student.areaName_[2] = area3;
|
||||
students_.push_back(student);
|
||||
}
|
||||
|
||||
/** get key for student and area, 0 is time slot itself */
|
||||
const DiscreteKey& Scheduler::key(size_t s, boost::optional<size_t> area) const {
|
||||
return area ? students_[s].keys_[*area] : students_[s].key_;
|
||||
// open file
|
||||
ifstream is(filename.c_str());
|
||||
if (!is) {
|
||||
cerr << "Scheduler: could not open file " << filename << endl;
|
||||
throw runtime_error("Scheduler: could not open file " + filename);
|
||||
}
|
||||
|
||||
const string& Scheduler::studentName(size_t i) const {
|
||||
assert(i<nrStudents());
|
||||
return students_[i].name_;
|
||||
string line; // buffer
|
||||
|
||||
// process first line with faculty
|
||||
if (getline(is, line, '\r')) {
|
||||
Tokenizer tok(line);
|
||||
Tokenizer::iterator it = tok.begin();
|
||||
for (++it; it != tok.end(); ++it) addFaculty(*it);
|
||||
}
|
||||
|
||||
const DiscreteKey& Scheduler::studentKey(size_t i) const {
|
||||
assert(i<nrStudents());
|
||||
return students_[i].key_;
|
||||
// for all remaining lines
|
||||
size_t count = 0;
|
||||
while (getline(is, line, '\r')) {
|
||||
if (count++ > 100) throw runtime_error("reached 100 lines, exiting");
|
||||
Tokenizer tok(line);
|
||||
Tokenizer::iterator it = tok.begin();
|
||||
addSlot(*it++); // add slot
|
||||
// add availability
|
||||
for (; it != tok.end(); ++it) available_ += (it->empty()) ? "0 " : "1 ";
|
||||
available_ += '\n';
|
||||
}
|
||||
} // constructor
|
||||
|
||||
/** addStudent has to be called after adding slots and faculty */
|
||||
void Scheduler::addStudent(const string& studentName, const string& area1,
|
||||
const string& area2, const string& area3,
|
||||
const string& advisor) {
|
||||
assert(nrStudents() < maxNrStudents_);
|
||||
assert(facultyInArea_.count(area1));
|
||||
assert(facultyInArea_.count(area2));
|
||||
assert(facultyInArea_.count(area3));
|
||||
size_t advisorIndex = facultyIndex_[advisor];
|
||||
Student student(nrFaculty(), advisorIndex);
|
||||
student.name_ = studentName;
|
||||
// We fix the ordering by assigning a higher index to the student
|
||||
// and numbering the areas lower
|
||||
Key j = 3 * maxNrStudents_ + nrStudents();
|
||||
student.key_ = DiscreteKey(j, nrTimeSlots());
|
||||
Key base = 3 * nrStudents();
|
||||
student.keys_[0] = DiscreteKey(base + 0, nrFaculty());
|
||||
student.keys_[1] = DiscreteKey(base + 1, nrFaculty());
|
||||
student.keys_[2] = DiscreteKey(base + 2, nrFaculty());
|
||||
student.areaName_[0] = area1;
|
||||
student.areaName_[1] = area2;
|
||||
student.areaName_[2] = area3;
|
||||
students_.push_back(student);
|
||||
}
|
||||
|
||||
/** get key for student and area, 0 is time slot itself */
|
||||
const DiscreteKey& Scheduler::key(size_t s,
|
||||
boost::optional<size_t> area) const {
|
||||
return area ? students_[s].keys_[*area] : students_[s].key_;
|
||||
}
|
||||
|
||||
const string& Scheduler::studentName(size_t i) const {
|
||||
assert(i < nrStudents());
|
||||
return students_[i].name_;
|
||||
}
|
||||
|
||||
const DiscreteKey& Scheduler::studentKey(size_t i) const {
|
||||
assert(i < nrStudents());
|
||||
return students_[i].key_;
|
||||
}
|
||||
|
||||
const string& Scheduler::studentArea(size_t i, size_t area) const {
|
||||
assert(i < nrStudents());
|
||||
return students_[i].areaName_[area];
|
||||
}
|
||||
|
||||
/** Add student-specific constraints to the graph */
|
||||
void Scheduler::addStudentSpecificConstraints(size_t i,
|
||||
boost::optional<size_t> slot) {
|
||||
bool debug = ISDEBUG("Scheduler::buildGraph");
|
||||
|
||||
assert(i < nrStudents());
|
||||
const Student& s = students_[i];
|
||||
|
||||
if (!slot && !slotsAvailable_.empty()) {
|
||||
if (debug) cout << "Adding availability of slots" << endl;
|
||||
assert(slotsAvailable_.size() == s.key_.second);
|
||||
CSP::add(s.key_, slotsAvailable_);
|
||||
}
|
||||
|
||||
const string& Scheduler::studentArea(size_t i, size_t area) const {
|
||||
assert(i<nrStudents());
|
||||
return students_[i].areaName_[area];
|
||||
}
|
||||
// For all areas
|
||||
for (size_t area = 0; area < 3; area++) {
|
||||
DiscreteKey areaKey = s.keys_[area];
|
||||
const string& areaName = s.areaName_[area];
|
||||
|
||||
/** Add student-specific constraints to the graph */
|
||||
void Scheduler::addStudentSpecificConstraints(size_t i, boost::optional<size_t> slot) {
|
||||
bool debug = ISDEBUG("Scheduler::buildGraph");
|
||||
if (debug) cout << "Area constraints " << areaName << endl;
|
||||
assert(facultyInArea_[areaName].size() == areaKey.second);
|
||||
CSP::add(areaKey, facultyInArea_[areaName]);
|
||||
|
||||
assert(i<nrStudents());
|
||||
const Student& s = students_[i];
|
||||
if (debug) cout << "Advisor constraint " << areaName << endl;
|
||||
assert(s.advisor_.size() == areaKey.second);
|
||||
CSP::add(areaKey, s.advisor_);
|
||||
|
||||
if (!slot && !slotsAvailable_.empty()) {
|
||||
if (debug) cout << "Adding availability of slots" << endl;
|
||||
assert(slotsAvailable_.size()==s.key_.second);
|
||||
CSP::add(s.key_, slotsAvailable_);
|
||||
}
|
||||
|
||||
// For all areas
|
||||
for (size_t area = 0; area < 3; area++) {
|
||||
|
||||
DiscreteKey areaKey = s.keys_[area];
|
||||
const string& areaName = s.areaName_[area];
|
||||
|
||||
if (debug) cout << "Area constraints " << areaName << endl;
|
||||
assert(facultyInArea_[areaName].size()==areaKey.second);
|
||||
CSP::add(areaKey, facultyInArea_[areaName]);
|
||||
|
||||
if (debug) cout << "Advisor constraint " << areaName << endl;
|
||||
assert(s.advisor_.size()==areaKey.second);
|
||||
CSP::add(areaKey, s.advisor_);
|
||||
|
||||
if (debug) cout << "Availability of faculty " << areaName << endl;
|
||||
if (slot) {
|
||||
// get all constraints then specialize to slot
|
||||
size_t dummyIndex = maxNrStudents_*3+maxNrStudents_;
|
||||
DiscreteKey dummy(dummyIndex, nrTimeSlots());
|
||||
Potentials::ADT p(dummy & areaKey, available_); // available_ is Doodle string
|
||||
Potentials::ADT q = p.choose(dummyIndex, *slot);
|
||||
DiscreteFactor::shared_ptr f(new DecisionTreeFactor(areaKey, q));
|
||||
CSP::push_back(f);
|
||||
} else {
|
||||
CSP::add(s.key_, areaKey, available_); // available_ is Doodle string
|
||||
}
|
||||
}
|
||||
|
||||
// add mutex
|
||||
if (debug) cout << "Mutex for faculty" << endl;
|
||||
addAllDiff(s.keys_[0] & s.keys_[1] & s.keys_[2]);
|
||||
}
|
||||
|
||||
|
||||
/** Main routine that builds factor graph */
|
||||
void Scheduler::buildGraph(size_t mutexBound) {
|
||||
bool debug = ISDEBUG("Scheduler::buildGraph");
|
||||
|
||||
if (debug) cout << "Adding student-specific constraints" << endl;
|
||||
for (size_t i = 0; i < nrStudents(); i++)
|
||||
addStudentSpecificConstraints(i);
|
||||
|
||||
// special constraint for MN
|
||||
if (studentName(0) == "Michael N") CSP::add(studentKey(0),
|
||||
"0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1");
|
||||
|
||||
if (!mutexBound) {
|
||||
DiscreteKeys dkeys;
|
||||
for(const Student& s: students_)
|
||||
dkeys.push_back(s.key_);
|
||||
addAllDiff(dkeys);
|
||||
if (debug) cout << "Availability of faculty " << areaName << endl;
|
||||
if (slot) {
|
||||
// get all constraints then specialize to slot
|
||||
size_t dummyIndex = maxNrStudents_ * 3 + maxNrStudents_;
|
||||
DiscreteKey dummy(dummyIndex, nrTimeSlots());
|
||||
Potentials::ADT p(dummy & areaKey,
|
||||
available_); // available_ is Doodle string
|
||||
Potentials::ADT q = p.choose(dummyIndex, *slot);
|
||||
DiscreteFactor::shared_ptr f(new DecisionTreeFactor(areaKey, q));
|
||||
CSP::push_back(f);
|
||||
} else {
|
||||
if (debug) cout << "Mutex for Students" << endl;
|
||||
for (size_t i1 = 0; i1 < nrStudents(); i1++) {
|
||||
// if mutexBound=1, we only mutex with next student
|
||||
size_t bound = min((i1 + 1 + mutexBound), nrStudents());
|
||||
for (size_t i2 = i1 + 1; i2 < bound; i2++) {
|
||||
addAllDiff(studentKey(i1), studentKey(i2));
|
||||
}
|
||||
CSP::add(s.key_, areaKey, available_); // available_ is Doodle string
|
||||
}
|
||||
}
|
||||
|
||||
// add mutex
|
||||
if (debug) cout << "Mutex for faculty" << endl;
|
||||
addAllDiff(s.keys_[0] & s.keys_[1] & s.keys_[2]);
|
||||
}
|
||||
|
||||
/** Main routine that builds factor graph */
|
||||
void Scheduler::buildGraph(size_t mutexBound) {
|
||||
bool debug = ISDEBUG("Scheduler::buildGraph");
|
||||
|
||||
if (debug) cout << "Adding student-specific constraints" << endl;
|
||||
for (size_t i = 0; i < nrStudents(); i++) addStudentSpecificConstraints(i);
|
||||
|
||||
// special constraint for MN
|
||||
if (studentName(0) == "Michael N")
|
||||
CSP::add(studentKey(0), "0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1");
|
||||
|
||||
if (!mutexBound) {
|
||||
DiscreteKeys dkeys;
|
||||
for (const Student& s : students_) dkeys.push_back(s.key_);
|
||||
addAllDiff(dkeys);
|
||||
} else {
|
||||
if (debug) cout << "Mutex for Students" << endl;
|
||||
for (size_t i1 = 0; i1 < nrStudents(); i1++) {
|
||||
// if mutexBound=1, we only mutex with next student
|
||||
size_t bound = min((i1 + 1 + mutexBound), nrStudents());
|
||||
for (size_t i2 = i1 + 1; i2 < bound; i2++) {
|
||||
addAllDiff(studentKey(i1), studentKey(i2));
|
||||
}
|
||||
}
|
||||
} // buildGraph
|
||||
|
||||
/** print */
|
||||
void Scheduler::print(const string& s, const KeyFormatter& formatter) const {
|
||||
cout << s << " Faculty:" << endl;
|
||||
for(const string& name: facultyName_)
|
||||
cout << name << '\n';
|
||||
cout << endl;
|
||||
|
||||
cout << s << " Slots:\n";
|
||||
size_t i = 0;
|
||||
for(const string& name: slotName_)
|
||||
cout << i++ << " " << name << endl;
|
||||
cout << endl;
|
||||
|
||||
cout << "Availability:\n" << available_ << '\n';
|
||||
|
||||
cout << s << " Area constraints:\n";
|
||||
for(const FacultyInArea::value_type& it: facultyInArea_)
|
||||
{
|
||||
cout << setw(12) << it.first << ": ";
|
||||
for(double v: it.second)
|
||||
cout << v << " ";
|
||||
cout << '\n';
|
||||
}
|
||||
cout << endl;
|
||||
|
||||
cout << s << " Students:\n";
|
||||
for (const Student& student: students_)
|
||||
student.print();
|
||||
cout << endl;
|
||||
|
||||
CSP::print(s + " Factor graph");
|
||||
cout << endl;
|
||||
} // print
|
||||
|
||||
/** Print readable form of assignment */
|
||||
void Scheduler::printAssignment(sharedValues assignment) const {
|
||||
// Not intended to be general! Assumes very particular ordering !
|
||||
cout << endl;
|
||||
for (size_t s = 0; s < nrStudents(); s++) {
|
||||
Key j = 3*maxNrStudents_ + s;
|
||||
size_t slot = assignment->at(j);
|
||||
cout << studentName(s) << " slot: " << slotName_[slot] << endl;
|
||||
Key base = 3*s;
|
||||
for (size_t area = 0; area < 3; area++) {
|
||||
size_t faculty = assignment->at(base+area);
|
||||
cout << setw(12) << studentArea(s,area) << ": " << facultyName_[faculty]
|
||||
<< endl;
|
||||
}
|
||||
cout << endl;
|
||||
}
|
||||
}
|
||||
} // buildGraph
|
||||
|
||||
/** Special print for single-student case */
|
||||
void Scheduler::printSpecial(sharedValues assignment) const {
|
||||
Values::const_iterator it = assignment->begin();
|
||||
for (size_t area = 0; area < 3; area++, it++) {
|
||||
size_t f = it->second;
|
||||
cout << setw(12) << studentArea(0,area) << ": " << facultyName_[f] << endl;
|
||||
/** print */
|
||||
void Scheduler::print(const string& s, const KeyFormatter& formatter) const {
|
||||
cout << s << " Faculty:" << endl;
|
||||
for (const string& name : facultyName_) cout << name << '\n';
|
||||
cout << endl;
|
||||
|
||||
cout << s << " Slots:\n";
|
||||
size_t i = 0;
|
||||
for (const string& name : slotName_) cout << i++ << " " << name << endl;
|
||||
cout << endl;
|
||||
|
||||
cout << "Availability:\n" << available_ << '\n';
|
||||
|
||||
cout << s << " Area constraints:\n";
|
||||
for (const FacultyInArea::value_type& it : facultyInArea_) {
|
||||
cout << setw(12) << it.first << ": ";
|
||||
for (double v : it.second) cout << v << " ";
|
||||
cout << '\n';
|
||||
}
|
||||
cout << endl;
|
||||
|
||||
cout << s << " Students:\n";
|
||||
for (const Student& student : students_) student.print();
|
||||
cout << endl;
|
||||
|
||||
CSP::print(s + " Factor graph");
|
||||
cout << endl;
|
||||
} // print
|
||||
|
||||
/** Print readable form of assignment */
|
||||
void Scheduler::printAssignment(sharedValues assignment) const {
|
||||
// Not intended to be general! Assumes very particular ordering !
|
||||
cout << endl;
|
||||
for (size_t s = 0; s < nrStudents(); s++) {
|
||||
Key j = 3 * maxNrStudents_ + s;
|
||||
size_t slot = assignment->at(j);
|
||||
cout << studentName(s) << " slot: " << slotName_[slot] << endl;
|
||||
Key base = 3 * s;
|
||||
for (size_t area = 0; area < 3; area++) {
|
||||
size_t faculty = assignment->at(base + area);
|
||||
cout << setw(12) << studentArea(s, area) << ": " << facultyName_[faculty]
|
||||
<< endl;
|
||||
}
|
||||
cout << endl;
|
||||
}
|
||||
}
|
||||
|
||||
/** Accumulate faculty stats */
|
||||
void Scheduler::accumulateStats(sharedValues assignment, vector<
|
||||
size_t>& stats) const {
|
||||
for (size_t s = 0; s < nrStudents(); s++) {
|
||||
Key base = 3*s;
|
||||
for (size_t area = 0; area < 3; area++) {
|
||||
size_t f = assignment->at(base+area);
|
||||
assert(f<stats.size());
|
||||
stats[f]++;
|
||||
} // area
|
||||
} // s
|
||||
/** Special print for single-student case */
|
||||
void Scheduler::printSpecial(sharedValues assignment) const {
|
||||
Values::const_iterator it = assignment->begin();
|
||||
for (size_t area = 0; area < 3; area++, it++) {
|
||||
size_t f = it->second;
|
||||
cout << setw(12) << studentArea(0, area) << ": " << facultyName_[f] << endl;
|
||||
}
|
||||
cout << endl;
|
||||
}
|
||||
|
||||
/** Accumulate faculty stats */
|
||||
void Scheduler::accumulateStats(sharedValues assignment,
|
||||
vector<size_t>& stats) const {
|
||||
for (size_t s = 0; s < nrStudents(); s++) {
|
||||
Key base = 3 * s;
|
||||
for (size_t area = 0; area < 3; area++) {
|
||||
size_t f = assignment->at(base + area);
|
||||
assert(f < stats.size());
|
||||
stats[f]++;
|
||||
} // area
|
||||
} // s
|
||||
}
|
||||
|
||||
/** Eliminate, return a Bayes net */
|
||||
DiscreteBayesNet::shared_ptr Scheduler::eliminate() const {
|
||||
gttic(my_eliminate);
|
||||
// TODO: fix this!!
|
||||
size_t maxKey = keys().size();
|
||||
Ordering defaultKeyOrdering;
|
||||
for (size_t i = 0; i < maxKey; ++i) defaultKeyOrdering += Key(i);
|
||||
DiscreteBayesNet::shared_ptr chordal =
|
||||
this->eliminateSequential(defaultKeyOrdering);
|
||||
gttoc(my_eliminate);
|
||||
return chordal;
|
||||
}
|
||||
|
||||
/** Find the best total assignment - can be expensive */
|
||||
Scheduler::sharedValues Scheduler::optimalAssignment() const {
|
||||
DiscreteBayesNet::shared_ptr chordal = eliminate();
|
||||
|
||||
if (ISDEBUG("Scheduler::optimalAssignment")) {
|
||||
DiscreteBayesNet::const_iterator it = chordal->end() - 1;
|
||||
const Student& student = students_.front();
|
||||
cout << endl;
|
||||
(*it)->print(student.name_);
|
||||
}
|
||||
|
||||
/** Eliminate, return a Bayes net */
|
||||
DiscreteBayesNet::shared_ptr Scheduler::eliminate() const {
|
||||
gttic(my_eliminate);
|
||||
// TODO: fix this!!
|
||||
size_t maxKey = keys().size();
|
||||
Ordering defaultKeyOrdering;
|
||||
for (size_t i = 0; i<maxKey; ++i)
|
||||
defaultKeyOrdering += Key(i);
|
||||
DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(defaultKeyOrdering);
|
||||
gttoc(my_eliminate);
|
||||
return chordal;
|
||||
}
|
||||
gttic(my_optimize);
|
||||
sharedValues mpe = chordal->optimize();
|
||||
gttoc(my_optimize);
|
||||
return mpe;
|
||||
}
|
||||
|
||||
/** Find the best total assignment - can be expensive */
|
||||
Scheduler::sharedValues Scheduler::optimalAssignment() const {
|
||||
DiscreteBayesNet::shared_ptr chordal = eliminate();
|
||||
|
||||
if (ISDEBUG("Scheduler::optimalAssignment")) {
|
||||
DiscreteBayesNet::const_iterator it = chordal->end()-1;
|
||||
const Student & student = students_.front();
|
||||
cout << endl;
|
||||
(*it)->print(student.name_);
|
||||
}
|
||||
|
||||
gttic(my_optimize);
|
||||
sharedValues mpe = chordal->optimize();
|
||||
gttoc(my_optimize);
|
||||
return mpe;
|
||||
}
|
||||
|
||||
/** find the assignment of students to slots with most possible committees */
|
||||
Scheduler::sharedValues Scheduler::bestSchedule() const {
|
||||
sharedValues best;
|
||||
throw runtime_error("bestSchedule not implemented");
|
||||
return best;
|
||||
}
|
||||
|
||||
/** find the corresponding most desirable committee assignment */
|
||||
Scheduler::sharedValues Scheduler::bestAssignment(
|
||||
sharedValues bestSchedule) const {
|
||||
sharedValues best;
|
||||
throw runtime_error("bestAssignment not implemented");
|
||||
return best;
|
||||
}
|
||||
|
||||
} // gtsam
|
||||
/** find the assignment of students to slots with most possible committees */
|
||||
Scheduler::sharedValues Scheduler::bestSchedule() const {
|
||||
sharedValues best;
|
||||
throw runtime_error("bestSchedule not implemented");
|
||||
return best;
|
||||
}
|
||||
|
||||
/** find the corresponding most desirable committee assignment */
|
||||
Scheduler::sharedValues Scheduler::bestAssignment(
|
||||
sharedValues bestSchedule) const {
|
||||
sharedValues best;
|
||||
throw runtime_error("bestAssignment not implemented");
|
||||
return best;
|
||||
}
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -11,165 +11,150 @@
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Scheduler class
|
||||
* Creates one variable for each student, and three variables for each
|
||||
* of the student's areas, for a total of 4*nrStudents variables.
|
||||
* The "student" variable will determine when the student takes the qual.
|
||||
* The "area" variables determine which faculty are on his/her committee.
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT Scheduler : public CSP {
|
||||
private:
|
||||
/** Internal data structure for students */
|
||||
struct Student {
|
||||
std::string name_;
|
||||
DiscreteKey key_; // key for student
|
||||
std::vector<DiscreteKey> keys_; // key for areas
|
||||
std::vector<std::string> areaName_;
|
||||
std::vector<double> advisor_;
|
||||
Student(size_t nrFaculty, size_t advisorIndex)
|
||||
: keys_(3), areaName_(3), advisor_(nrFaculty, 1.0) {
|
||||
advisor_[advisorIndex] = 0.0;
|
||||
}
|
||||
void print() const {
|
||||
using std::cout;
|
||||
cout << name_ << ": ";
|
||||
for (size_t area = 0; area < 3; area++) cout << areaName_[area] << " ";
|
||||
cout << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
/** Maximum number of students */
|
||||
size_t maxNrStudents_;
|
||||
|
||||
/** discrete keys, indexed by student and area index */
|
||||
std::vector<Student> students_;
|
||||
|
||||
/** faculty identifiers */
|
||||
std::map<std::string, size_t> facultyIndex_;
|
||||
std::vector<std::string> facultyName_, slotName_, areaName_;
|
||||
|
||||
/** area constraints */
|
||||
typedef std::map<std::string, std::vector<double> > FacultyInArea;
|
||||
FacultyInArea facultyInArea_;
|
||||
|
||||
/** nrTimeSlots * nrFaculty availability constraints */
|
||||
std::string available_;
|
||||
|
||||
/** which slots are good */
|
||||
std::vector<double> slotsAvailable_;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Scheduler class
|
||||
* Creates one variable for each student, and three variables for each
|
||||
* of the student's areas, for a total of 4*nrStudents variables.
|
||||
* The "student" variable will determine when the student takes the qual.
|
||||
* The "area" variables determine which faculty are on his/her committee.
|
||||
* Constructor
|
||||
* We need to know the number of students in advance for ordering keys.
|
||||
* then add faculty, slots, areas, availability, students, in that order
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT Scheduler : public CSP {
|
||||
Scheduler(size_t maxNrStudents) : maxNrStudents_(maxNrStudents) {}
|
||||
|
||||
private:
|
||||
/// Destructor
|
||||
virtual ~Scheduler() {}
|
||||
|
||||
/** Internal data structure for students */
|
||||
struct Student {
|
||||
std::string name_;
|
||||
DiscreteKey key_; // key for student
|
||||
std::vector<DiscreteKey> keys_; // key for areas
|
||||
std::vector<std::string> areaName_;
|
||||
std::vector<double> advisor_;
|
||||
Student(size_t nrFaculty, size_t advisorIndex) :
|
||||
keys_(3), areaName_(3), advisor_(nrFaculty, 1.0) {
|
||||
advisor_[advisorIndex] = 0.0;
|
||||
}
|
||||
void print() const {
|
||||
using std::cout;
|
||||
cout << name_ << ": ";
|
||||
for (size_t area = 0; area < 3; area++)
|
||||
cout << areaName_[area] << " ";
|
||||
cout << std::endl;
|
||||
}
|
||||
};
|
||||
void addFaculty(const std::string& facultyName) {
|
||||
facultyIndex_[facultyName] = nrFaculty();
|
||||
facultyName_.push_back(facultyName);
|
||||
}
|
||||
|
||||
/** Maximum number of students */
|
||||
size_t maxNrStudents_;
|
||||
size_t nrFaculty() const { return facultyName_.size(); }
|
||||
|
||||
/** discrete keys, indexed by student and area index */
|
||||
std::vector<Student> students_;
|
||||
/** boolean std::string of nrTimeSlots * nrFaculty */
|
||||
void setAvailability(const std::string& available) { available_ = available; }
|
||||
|
||||
/** faculty identifiers */
|
||||
std::map<std::string, size_t> facultyIndex_;
|
||||
std::vector<std::string> facultyName_, slotName_, areaName_;
|
||||
void addSlot(const std::string& slotName) { slotName_.push_back(slotName); }
|
||||
|
||||
/** area constraints */
|
||||
typedef std::map<std::string, std::vector<double> > FacultyInArea;
|
||||
FacultyInArea facultyInArea_;
|
||||
size_t nrTimeSlots() const { return slotName_.size(); }
|
||||
|
||||
/** nrTimeSlots * nrFaculty availability constraints */
|
||||
std::string available_;
|
||||
const std::string& slotName(size_t s) const { return slotName_[s]; }
|
||||
|
||||
/** which slots are good */
|
||||
std::vector<double> slotsAvailable_;
|
||||
/** slots available, boolean */
|
||||
void setSlotsAvailable(const std::vector<double>& slotsAvailable) {
|
||||
slotsAvailable_ = slotsAvailable;
|
||||
}
|
||||
|
||||
public:
|
||||
void addArea(const std::string& facultyName, const std::string& areaName) {
|
||||
areaName_.push_back(areaName);
|
||||
std::vector<double>& table =
|
||||
facultyInArea_[areaName]; // will create if needed
|
||||
if (table.empty()) table.resize(nrFaculty(), 0);
|
||||
table[facultyIndex_[facultyName]] = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* We need to know the number of students in advance for ordering keys.
|
||||
* then add faculty, slots, areas, availability, students, in that order
|
||||
*/
|
||||
Scheduler(size_t maxNrStudents) : maxNrStudents_(maxNrStudents) {}
|
||||
/**
|
||||
* Constructor that reads in faculty, slots, availibility.
|
||||
* Still need to add areas and students after this
|
||||
*/
|
||||
Scheduler(size_t maxNrStudents, const std::string& filename);
|
||||
|
||||
/// Destructor
|
||||
virtual ~Scheduler() {}
|
||||
/** get key for student and area, 0 is time slot itself */
|
||||
const DiscreteKey& key(size_t s,
|
||||
boost::optional<size_t> area = boost::none) const;
|
||||
|
||||
void addFaculty(const std::string& facultyName) {
|
||||
facultyIndex_[facultyName] = nrFaculty();
|
||||
facultyName_.push_back(facultyName);
|
||||
}
|
||||
/** addStudent has to be called after adding slots and faculty */
|
||||
void addStudent(const std::string& studentName, const std::string& area1,
|
||||
const std::string& area2, const std::string& area3,
|
||||
const std::string& advisor);
|
||||
|
||||
size_t nrFaculty() const {
|
||||
return facultyName_.size();
|
||||
}
|
||||
/// current number of students
|
||||
size_t nrStudents() const { return students_.size(); }
|
||||
|
||||
/** boolean std::string of nrTimeSlots * nrFaculty */
|
||||
void setAvailability(const std::string& available) {
|
||||
available_ = available;
|
||||
}
|
||||
const std::string& studentName(size_t i) const;
|
||||
const DiscreteKey& studentKey(size_t i) const;
|
||||
const std::string& studentArea(size_t i, size_t area) const;
|
||||
|
||||
void addSlot(const std::string& slotName) {
|
||||
slotName_.push_back(slotName);
|
||||
}
|
||||
/** Add student-specific constraints to the graph */
|
||||
void addStudentSpecificConstraints(
|
||||
size_t i, boost::optional<size_t> slot = boost::none);
|
||||
|
||||
size_t nrTimeSlots() const {
|
||||
return slotName_.size();
|
||||
}
|
||||
/** Main routine that builds factor graph */
|
||||
void buildGraph(size_t mutexBound = 7);
|
||||
|
||||
const std::string& slotName(size_t s) const {
|
||||
return slotName_[s];
|
||||
}
|
||||
/** print */
|
||||
void print(
|
||||
const std::string& s = "Scheduler",
|
||||
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/** slots available, boolean */
|
||||
void setSlotsAvailable(const std::vector<double>& slotsAvailable) {
|
||||
slotsAvailable_ = slotsAvailable;
|
||||
}
|
||||
/** Print readable form of assignment */
|
||||
void printAssignment(sharedValues assignment) const;
|
||||
|
||||
void addArea(const std::string& facultyName, const std::string& areaName) {
|
||||
areaName_.push_back(areaName);
|
||||
std::vector<double>& table = facultyInArea_[areaName]; // will create if needed
|
||||
if (table.empty()) table.resize(nrFaculty(), 0);
|
||||
table[facultyIndex_[facultyName]] = 1;
|
||||
}
|
||||
/** Special print for single-student case */
|
||||
void printSpecial(sharedValues assignment) const;
|
||||
|
||||
/**
|
||||
* Constructor that reads in faculty, slots, availibility.
|
||||
* Still need to add areas and students after this
|
||||
*/
|
||||
Scheduler(size_t maxNrStudents, const std::string& filename);
|
||||
/** Accumulate faculty stats */
|
||||
void accumulateStats(sharedValues assignment,
|
||||
std::vector<size_t>& stats) const;
|
||||
|
||||
/** get key for student and area, 0 is time slot itself */
|
||||
const DiscreteKey& key(size_t s, boost::optional<size_t> area = boost::none) const;
|
||||
/** Eliminate, return a Bayes net */
|
||||
DiscreteBayesNet::shared_ptr eliminate() const;
|
||||
|
||||
/** addStudent has to be called after adding slots and faculty */
|
||||
void addStudent(const std::string& studentName, const std::string& area1,
|
||||
const std::string& area2, const std::string& area3,
|
||||
const std::string& advisor);
|
||||
/** Find the best total assignment - can be expensive */
|
||||
sharedValues optimalAssignment() const;
|
||||
|
||||
/// current number of students
|
||||
size_t nrStudents() const {
|
||||
return students_.size();
|
||||
}
|
||||
/** find the assignment of students to slots with most possible committees */
|
||||
sharedValues bestSchedule() const;
|
||||
|
||||
const std::string& studentName(size_t i) const;
|
||||
const DiscreteKey& studentKey(size_t i) const;
|
||||
const std::string& studentArea(size_t i, size_t area) const;
|
||||
|
||||
/** Add student-specific constraints to the graph */
|
||||
void addStudentSpecificConstraints(size_t i, boost::optional<size_t> slot = boost::none);
|
||||
|
||||
/** Main routine that builds factor graph */
|
||||
void buildGraph(size_t mutexBound = 7);
|
||||
|
||||
/** print */
|
||||
void print(
|
||||
const std::string& s = "Scheduler",
|
||||
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/** Print readable form of assignment */
|
||||
void printAssignment(sharedValues assignment) const;
|
||||
|
||||
/** Special print for single-student case */
|
||||
void printSpecial(sharedValues assignment) const;
|
||||
|
||||
/** Accumulate faculty stats */
|
||||
void accumulateStats(sharedValues assignment,
|
||||
std::vector<size_t>& stats) const;
|
||||
|
||||
/** Eliminate, return a Bayes net */
|
||||
DiscreteBayesNet::shared_ptr eliminate() const;
|
||||
|
||||
/** Find the best total assignment - can be expensive */
|
||||
sharedValues optimalAssignment() const;
|
||||
|
||||
/** find the assignment of students to slots with most possible committees */
|
||||
sharedValues bestSchedule() const;
|
||||
|
||||
/** find the corresponding most desirable committee assignment */
|
||||
sharedValues bestAssignment(sharedValues bestSchedule) const;
|
||||
|
||||
}; // Scheduler
|
||||
|
||||
} // gtsam
|
||||
/** find the corresponding most desirable committee assignment */
|
||||
sharedValues bestAssignment(sharedValues bestSchedule) const;
|
||||
|
||||
}; // Scheduler
|
||||
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -5,75 +5,74 @@
|
|||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam_unstable/discrete/SingleValue.h>
|
||||
#include <gtsam_unstable/discrete/Domain.h>
|
||||
#include <gtsam/discrete/DecisionTreeFactor.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/discrete/DecisionTreeFactor.h>
|
||||
#include <gtsam_unstable/discrete/Domain.h>
|
||||
#include <gtsam_unstable/discrete/SingleValue.h>
|
||||
|
||||
#include <boost/make_shared.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
using namespace std;
|
||||
|
||||
/* ************************************************************************* */
|
||||
void SingleValue::print(const string& s,
|
||||
const KeyFormatter& formatter) const {
|
||||
cout << s << "SingleValue on " << "j=" << formatter(keys_[0])
|
||||
<< " with value " << value_ << endl;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double SingleValue::operator()(const Values& values) const {
|
||||
return (double) (values.at(keys_[0]) == value_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor SingleValue::toDecisionTreeFactor() const {
|
||||
DiscreteKeys keys;
|
||||
keys += DiscreteKey(keys_[0],cardinality_);
|
||||
vector<double> table;
|
||||
for (size_t i1 = 0; i1 < cardinality_; i1++)
|
||||
table.push_back(i1 == value_);
|
||||
DecisionTreeFactor converted(keys, table);
|
||||
return converted;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor SingleValue::operator*(const DecisionTreeFactor& f) const {
|
||||
// TODO: can we do this more efficiently?
|
||||
return toDecisionTreeFactor() * f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool SingleValue::ensureArcConsistency(size_t j,
|
||||
vector<Domain>& domains) const {
|
||||
if (j != keys_[0]) throw invalid_argument(
|
||||
"SingleValue check on wrong domain");
|
||||
Domain& D = domains[j];
|
||||
if (D.isSingleton()) {
|
||||
if (D.firstValue() != value_) throw runtime_error("Unsatisfiable");
|
||||
return false;
|
||||
}
|
||||
D = Domain(discreteKey(),value_);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr SingleValue::partiallyApply(const Values& values) const {
|
||||
Values::const_iterator it = values.find(keys_[0]);
|
||||
if (it != values.end() && it->second != value_) throw runtime_error(
|
||||
"SingleValue::partiallyApply: unsatisfiable");
|
||||
return boost::make_shared < SingleValue > (keys_[0], cardinality_, value_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr SingleValue::partiallyApply(
|
||||
const vector<Domain>& domains) const {
|
||||
const Domain& Dk = domains[keys_[0]];
|
||||
if (Dk.isSingleton() && !Dk.contains(value_)) throw runtime_error(
|
||||
"SingleValue::partiallyApply: unsatisfiable");
|
||||
return boost::make_shared < SingleValue > (discreteKey(), value_);
|
||||
}
|
||||
using namespace std;
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
void SingleValue::print(const string& s, const KeyFormatter& formatter) const {
|
||||
cout << s << "SingleValue on "
|
||||
<< "j=" << formatter(keys_[0]) << " with value " << value_ << endl;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double SingleValue::operator()(const Values& values) const {
|
||||
return (double)(values.at(keys_[0]) == value_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor SingleValue::toDecisionTreeFactor() const {
|
||||
DiscreteKeys keys;
|
||||
keys += DiscreteKey(keys_[0], cardinality_);
|
||||
vector<double> table;
|
||||
for (size_t i1 = 0; i1 < cardinality_; i1++) table.push_back(i1 == value_);
|
||||
DecisionTreeFactor converted(keys, table);
|
||||
return converted;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
DecisionTreeFactor SingleValue::operator*(const DecisionTreeFactor& f) const {
|
||||
// TODO: can we do this more efficiently?
|
||||
return toDecisionTreeFactor() * f;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool SingleValue::ensureArcConsistency(size_t j,
|
||||
vector<Domain>& domains) const {
|
||||
if (j != keys_[0])
|
||||
throw invalid_argument("SingleValue check on wrong domain");
|
||||
Domain& D = domains[j];
|
||||
if (D.isSingleton()) {
|
||||
if (D.firstValue() != value_) throw runtime_error("Unsatisfiable");
|
||||
return false;
|
||||
}
|
||||
D = Domain(discreteKey(), value_);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr SingleValue::partiallyApply(const Values& values) const {
|
||||
Values::const_iterator it = values.find(keys_[0]);
|
||||
if (it != values.end() && it->second != value_)
|
||||
throw runtime_error("SingleValue::partiallyApply: unsatisfiable");
|
||||
return boost::make_shared<SingleValue>(keys_[0], cardinality_, value_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Constraint::shared_ptr SingleValue::partiallyApply(
|
||||
const vector<Domain>& domains) const {
|
||||
const Domain& Dk = domains[keys_[0]];
|
||||
if (Dk.isSingleton() && !Dk.contains(value_))
|
||||
throw runtime_error("SingleValue::partiallyApply: unsatisfiable");
|
||||
return boost::make_shared<SingleValue>(discreteKey(), value_);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -7,76 +7,73 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam_unstable/discrete/Constraint.h>
|
||||
#include <gtsam/discrete/DiscreteKey.h>
|
||||
#include <gtsam_unstable/discrete/Constraint.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* SingleValue constraint
|
||||
/**
|
||||
* SingleValue constraint
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT SingleValue : public Constraint {
|
||||
/// Number of values
|
||||
size_t cardinality_;
|
||||
|
||||
/// allowed value
|
||||
size_t value_;
|
||||
|
||||
DiscreteKey discreteKey() const {
|
||||
return DiscreteKey(keys_[0], cardinality_);
|
||||
}
|
||||
|
||||
public:
|
||||
typedef boost::shared_ptr<SingleValue> shared_ptr;
|
||||
|
||||
/// Constructor
|
||||
SingleValue(Key key, size_t n, size_t value)
|
||||
: Constraint(key), cardinality_(n), value_(value) {}
|
||||
|
||||
/// Constructor
|
||||
SingleValue(const DiscreteKey& dkey, size_t value)
|
||||
: Constraint(dkey.first), cardinality_(dkey.second), value_(value) {}
|
||||
|
||||
// print
|
||||
void print(const std::string& s = "", const KeyFormatter& formatter =
|
||||
DefaultKeyFormatter) const override;
|
||||
|
||||
/// equals
|
||||
bool equals(const DiscreteFactor& other, double tol) const override {
|
||||
if (!dynamic_cast<const SingleValue*>(&other))
|
||||
return false;
|
||||
else {
|
||||
const SingleValue& f(static_cast<const SingleValue&>(other));
|
||||
return (cardinality_ == f.cardinality_) && (value_ == f.value_);
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate value
|
||||
double operator()(const Values& values) const override;
|
||||
|
||||
/// Convert into a decisiontree
|
||||
DecisionTreeFactor toDecisionTreeFactor() const override;
|
||||
|
||||
/// Multiply into a decisiontree
|
||||
DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override;
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
*/
|
||||
class GTSAM_UNSTABLE_EXPORT SingleValue: public Constraint {
|
||||
bool ensureArcConsistency(size_t j,
|
||||
std::vector<Domain>& domains) const override;
|
||||
|
||||
/// Number of values
|
||||
size_t cardinality_;
|
||||
/// Partially apply known values
|
||||
Constraint::shared_ptr partiallyApply(const Values& values) const override;
|
||||
|
||||
/// allowed value
|
||||
size_t value_;
|
||||
/// Partially apply known values, domain version
|
||||
Constraint::shared_ptr partiallyApply(
|
||||
const std::vector<Domain>& domains) const override;
|
||||
};
|
||||
|
||||
DiscreteKey discreteKey() const {
|
||||
return DiscreteKey(keys_[0],cardinality_);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
typedef boost::shared_ptr<SingleValue> shared_ptr;
|
||||
|
||||
/// Constructor
|
||||
SingleValue(Key key, size_t n, size_t value) :
|
||||
Constraint(key), cardinality_(n), value_(value) {
|
||||
}
|
||||
|
||||
/// Constructor
|
||||
SingleValue(const DiscreteKey& dkey, size_t value) :
|
||||
Constraint(dkey.first), cardinality_(dkey.second), value_(value) {
|
||||
}
|
||||
|
||||
// print
|
||||
void print(const std::string& s = "",
|
||||
const KeyFormatter& formatter = DefaultKeyFormatter) const override;
|
||||
|
||||
/// equals
|
||||
bool equals(const DiscreteFactor& other, double tol) const override {
|
||||
if(!dynamic_cast<const SingleValue*>(&other))
|
||||
return false;
|
||||
else {
|
||||
const SingleValue& f(static_cast<const SingleValue&>(other));
|
||||
return (cardinality_==f.cardinality_) && (value_==f.value_);
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculate value
|
||||
double operator()(const Values& values) const override;
|
||||
|
||||
/// Convert into a decisiontree
|
||||
DecisionTreeFactor toDecisionTreeFactor() const override;
|
||||
|
||||
/// Multiply into a decisiontree
|
||||
DecisionTreeFactor operator*(const DecisionTreeFactor& f) const override;
|
||||
|
||||
/*
|
||||
* Ensure Arc-consistency
|
||||
* @param j domain to be checked
|
||||
* @param domains all other domains
|
||||
*/
|
||||
bool ensureArcConsistency(size_t j, std::vector<Domain>& domains) const override;
|
||||
|
||||
/// Partially apply known values
|
||||
Constraint::shared_ptr partiallyApply(const Values& values) const override;
|
||||
|
||||
/// Partially apply known values, domain version
|
||||
Constraint::shared_ptr partiallyApply(
|
||||
const std::vector<Domain>& domains) const override;
|
||||
};
|
||||
|
||||
} // namespace gtsam
|
||||
} // namespace gtsam
|
||||
|
|
|
@ -7,49 +7,50 @@
|
|||
|
||||
#include <gtsam_unstable/discrete/CSP.h>
|
||||
#include <gtsam_unstable/discrete/Domain.h>
|
||||
|
||||
#include <boost/assign/std/map.hpp>
|
||||
using boost::assign::insert;
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <iostream>
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( BinaryAllDif, allInOne)
|
||||
{
|
||||
TEST_UNSAFE(BinaryAllDif, allInOne) {
|
||||
// Create keys and ordering
|
||||
size_t nrColors = 2;
|
||||
// DiscreteKey ID("Idaho", nrColors), UT("Utah", nrColors), AZ("Arizona", nrColors);
|
||||
// DiscreteKey ID("Idaho", nrColors), UT("Utah", nrColors), AZ("Arizona",
|
||||
// nrColors);
|
||||
DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors);
|
||||
|
||||
// Check construction and conversion
|
||||
BinaryAllDiff c1(ID, UT);
|
||||
DecisionTreeFactor f1(ID & UT, "0 1 1 0");
|
||||
EXPECT(assert_equal(f1,c1.toDecisionTreeFactor()));
|
||||
EXPECT(assert_equal(f1, c1.toDecisionTreeFactor()));
|
||||
|
||||
// Check construction and conversion
|
||||
BinaryAllDiff c2(UT, AZ);
|
||||
DecisionTreeFactor f2(UT & AZ, "0 1 1 0");
|
||||
EXPECT(assert_equal(f2,c2.toDecisionTreeFactor()));
|
||||
EXPECT(assert_equal(f2, c2.toDecisionTreeFactor()));
|
||||
|
||||
DecisionTreeFactor f3 = f1*f2;
|
||||
EXPECT(assert_equal(f3,c1*f2));
|
||||
EXPECT(assert_equal(f3,c2*f1));
|
||||
DecisionTreeFactor f3 = f1 * f2;
|
||||
EXPECT(assert_equal(f3, c1 * f2));
|
||||
EXPECT(assert_equal(f3, c2 * f1));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( CSP, allInOne)
|
||||
{
|
||||
TEST_UNSAFE(CSP, allInOne) {
|
||||
// Create keys and ordering
|
||||
size_t nrColors = 2;
|
||||
DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors);
|
||||
|
||||
// Create the CSP
|
||||
CSP csp;
|
||||
csp.addAllDiff(ID,UT);
|
||||
csp.addAllDiff(UT,AZ);
|
||||
csp.addAllDiff(ID, UT);
|
||||
csp.addAllDiff(UT, AZ);
|
||||
|
||||
// Check an invalid combination, with ID==UT==AZ all same color
|
||||
DiscreteFactor::Values invalid;
|
||||
|
@ -69,67 +70,67 @@ TEST_UNSAFE( CSP, allInOne)
|
|||
DecisionTreeFactor product = csp.product();
|
||||
// product.dot("product");
|
||||
DecisionTreeFactor expectedProduct(ID & AZ & UT, "0 1 0 0 0 0 1 0");
|
||||
EXPECT(assert_equal(expectedProduct,product));
|
||||
EXPECT(assert_equal(expectedProduct, product));
|
||||
|
||||
// Solve
|
||||
CSP::sharedValues mpe = csp.optimalAssignment();
|
||||
CSP::Values expected;
|
||||
insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 1);
|
||||
EXPECT(assert_equal(expected,*mpe));
|
||||
EXPECT(assert_equal(expected, *mpe));
|
||||
EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( CSP, WesternUS)
|
||||
{
|
||||
TEST_UNSAFE(CSP, WesternUS) {
|
||||
// Create keys
|
||||
size_t nrColors = 4;
|
||||
DiscreteKey
|
||||
// Create ordering according to example in ND-CSP.lyx
|
||||
WA(0, nrColors), OR(3, nrColors), CA(1, nrColors),NV(2, nrColors),
|
||||
ID(8, nrColors), UT(9, nrColors), AZ(10, nrColors),
|
||||
MT(4, nrColors), WY(5, nrColors), CO(7, nrColors), NM(6, nrColors);
|
||||
// Create ordering according to example in ND-CSP.lyx
|
||||
WA(0, nrColors),
|
||||
OR(3, nrColors), CA(1, nrColors), NV(2, nrColors), ID(8, nrColors),
|
||||
UT(9, nrColors), AZ(10, nrColors), MT(4, nrColors), WY(5, nrColors),
|
||||
CO(7, nrColors), NM(6, nrColors);
|
||||
|
||||
// Create the CSP
|
||||
CSP csp;
|
||||
csp.addAllDiff(WA,ID);
|
||||
csp.addAllDiff(WA,OR);
|
||||
csp.addAllDiff(OR,ID);
|
||||
csp.addAllDiff(OR,CA);
|
||||
csp.addAllDiff(OR,NV);
|
||||
csp.addAllDiff(CA,NV);
|
||||
csp.addAllDiff(CA,AZ);
|
||||
csp.addAllDiff(ID,MT);
|
||||
csp.addAllDiff(ID,WY);
|
||||
csp.addAllDiff(ID,UT);
|
||||
csp.addAllDiff(ID,NV);
|
||||
csp.addAllDiff(NV,UT);
|
||||
csp.addAllDiff(NV,AZ);
|
||||
csp.addAllDiff(UT,WY);
|
||||
csp.addAllDiff(UT,CO);
|
||||
csp.addAllDiff(UT,NM);
|
||||
csp.addAllDiff(UT,AZ);
|
||||
csp.addAllDiff(AZ,CO);
|
||||
csp.addAllDiff(AZ,NM);
|
||||
csp.addAllDiff(MT,WY);
|
||||
csp.addAllDiff(WY,CO);
|
||||
csp.addAllDiff(CO,NM);
|
||||
csp.addAllDiff(WA, ID);
|
||||
csp.addAllDiff(WA, OR);
|
||||
csp.addAllDiff(OR, ID);
|
||||
csp.addAllDiff(OR, CA);
|
||||
csp.addAllDiff(OR, NV);
|
||||
csp.addAllDiff(CA, NV);
|
||||
csp.addAllDiff(CA, AZ);
|
||||
csp.addAllDiff(ID, MT);
|
||||
csp.addAllDiff(ID, WY);
|
||||
csp.addAllDiff(ID, UT);
|
||||
csp.addAllDiff(ID, NV);
|
||||
csp.addAllDiff(NV, UT);
|
||||
csp.addAllDiff(NV, AZ);
|
||||
csp.addAllDiff(UT, WY);
|
||||
csp.addAllDiff(UT, CO);
|
||||
csp.addAllDiff(UT, NM);
|
||||
csp.addAllDiff(UT, AZ);
|
||||
csp.addAllDiff(AZ, CO);
|
||||
csp.addAllDiff(AZ, NM);
|
||||
csp.addAllDiff(MT, WY);
|
||||
csp.addAllDiff(WY, CO);
|
||||
csp.addAllDiff(CO, NM);
|
||||
|
||||
// Solve
|
||||
Ordering ordering;
|
||||
ordering += Key(0),Key(1),Key(2),Key(3),Key(4),Key(5),Key(6),Key(7),Key(8),Key(9),Key(10);
|
||||
ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7),
|
||||
Key(8), Key(9), Key(10);
|
||||
CSP::sharedValues mpe = csp.optimalAssignment(ordering);
|
||||
// GTSAM_PRINT(*mpe);
|
||||
CSP::Values expected;
|
||||
insert(expected)
|
||||
(WA.first,1)(CA.first,1)(NV.first,3)(OR.first,0)
|
||||
(MT.first,1)(WY.first,0)(NM.first,3)(CO.first,2)
|
||||
(ID.first,2)(UT.first,1)(AZ.first,0);
|
||||
insert(expected)(WA.first, 1)(CA.first, 1)(NV.first, 3)(OR.first, 0)(
|
||||
MT.first, 1)(WY.first, 0)(NM.first, 3)(CO.first, 2)(ID.first, 2)(
|
||||
UT.first, 1)(AZ.first, 0);
|
||||
|
||||
// TODO: Fix me! mpe result seems to be right. (See the printing)
|
||||
// It has the same prob as the expected solution.
|
||||
// Is mpe another solution, or the expected solution is unique???
|
||||
EXPECT(assert_equal(expected,*mpe));
|
||||
EXPECT(assert_equal(expected, *mpe));
|
||||
EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9);
|
||||
|
||||
// Write out the dual graph for hmetis
|
||||
|
@ -142,8 +143,7 @@ TEST_UNSAFE( CSP, WesternUS)
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( CSP, AllDiff)
|
||||
{
|
||||
TEST_UNSAFE(CSP, AllDiff) {
|
||||
// Create keys and ordering
|
||||
size_t nrColors = 3;
|
||||
DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors);
|
||||
|
@ -151,24 +151,25 @@ TEST_UNSAFE( CSP, AllDiff)
|
|||
// Create the CSP
|
||||
CSP csp;
|
||||
vector<DiscreteKey> dkeys;
|
||||
dkeys += ID,UT,AZ;
|
||||
dkeys += ID, UT, AZ;
|
||||
csp.addAllDiff(dkeys);
|
||||
csp.addSingleValue(AZ,2);
|
||||
// GTSAM_PRINT(csp);
|
||||
csp.addSingleValue(AZ, 2);
|
||||
// GTSAM_PRINT(csp);
|
||||
|
||||
// Check construction and conversion
|
||||
SingleValue s(AZ,2);
|
||||
DecisionTreeFactor f1(AZ,"0 0 1");
|
||||
EXPECT(assert_equal(f1,s.toDecisionTreeFactor()));
|
||||
SingleValue s(AZ, 2);
|
||||
DecisionTreeFactor f1(AZ, "0 0 1");
|
||||
EXPECT(assert_equal(f1, s.toDecisionTreeFactor()));
|
||||
|
||||
// Check construction and conversion
|
||||
AllDiff alldiff(dkeys);
|
||||
DecisionTreeFactor actual = alldiff.toDecisionTreeFactor();
|
||||
// GTSAM_PRINT(actual);
|
||||
// actual.dot("actual");
|
||||
DecisionTreeFactor f2(ID & AZ & UT,
|
||||
// GTSAM_PRINT(actual);
|
||||
// actual.dot("actual");
|
||||
DecisionTreeFactor f2(
|
||||
ID & AZ & UT,
|
||||
"0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0");
|
||||
EXPECT(assert_equal(f2,actual));
|
||||
EXPECT(assert_equal(f2, actual));
|
||||
|
||||
// Check an invalid combination, with ID==UT==AZ all same color
|
||||
DiscreteFactor::Values invalid;
|
||||
|
@ -188,36 +189,36 @@ TEST_UNSAFE( CSP, AllDiff)
|
|||
CSP::sharedValues mpe = csp.optimalAssignment();
|
||||
CSP::Values expected;
|
||||
insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 2);
|
||||
EXPECT(assert_equal(expected,*mpe));
|
||||
EXPECT(assert_equal(expected, *mpe));
|
||||
EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9);
|
||||
|
||||
// Arc-consistency
|
||||
vector<Domain> domains;
|
||||
domains += Domain(ID), Domain(AZ), Domain(UT);
|
||||
SingleValue singleValue(AZ,2);
|
||||
EXPECT(singleValue.ensureArcConsistency(1,domains));
|
||||
EXPECT(alldiff.ensureArcConsistency(0,domains));
|
||||
EXPECT(!alldiff.ensureArcConsistency(1,domains));
|
||||
EXPECT(alldiff.ensureArcConsistency(2,domains));
|
||||
LONGS_EQUAL(2,domains[0].nrValues());
|
||||
LONGS_EQUAL(1,domains[1].nrValues());
|
||||
LONGS_EQUAL(2,domains[2].nrValues());
|
||||
SingleValue singleValue(AZ, 2);
|
||||
EXPECT(singleValue.ensureArcConsistency(1, domains));
|
||||
EXPECT(alldiff.ensureArcConsistency(0, domains));
|
||||
EXPECT(!alldiff.ensureArcConsistency(1, domains));
|
||||
EXPECT(alldiff.ensureArcConsistency(2, domains));
|
||||
LONGS_EQUAL(2, domains[0].nrValues());
|
||||
LONGS_EQUAL(1, domains[1].nrValues());
|
||||
LONGS_EQUAL(2, domains[2].nrValues());
|
||||
|
||||
// Parial application, version 1
|
||||
DiscreteFactor::Values known;
|
||||
known[AZ.first] = 2;
|
||||
DiscreteFactor::shared_ptr reduced1 = alldiff.partiallyApply(known);
|
||||
DecisionTreeFactor f3(ID & UT, "0 1 1 1 0 1 1 1 0");
|
||||
EXPECT(assert_equal(f3,reduced1->toDecisionTreeFactor()));
|
||||
EXPECT(assert_equal(f3, reduced1->toDecisionTreeFactor()));
|
||||
DiscreteFactor::shared_ptr reduced2 = singleValue.partiallyApply(known);
|
||||
DecisionTreeFactor f4(AZ, "0 0 1");
|
||||
EXPECT(assert_equal(f4,reduced2->toDecisionTreeFactor()));
|
||||
EXPECT(assert_equal(f4, reduced2->toDecisionTreeFactor()));
|
||||
|
||||
// Parial application, version 2
|
||||
DiscreteFactor::shared_ptr reduced3 = alldiff.partiallyApply(domains);
|
||||
EXPECT(assert_equal(f3,reduced3->toDecisionTreeFactor()));
|
||||
EXPECT(assert_equal(f3, reduced3->toDecisionTreeFactor()));
|
||||
DiscreteFactor::shared_ptr reduced4 = singleValue.partiallyApply(domains);
|
||||
EXPECT(assert_equal(f4,reduced4->toDecisionTreeFactor()));
|
||||
EXPECT(assert_equal(f4, reduced4->toDecisionTreeFactor()));
|
||||
|
||||
// full arc-consistency test
|
||||
csp.runArcConsistency(nrColors);
|
||||
|
@ -229,4 +230,3 @@ int main() {
|
|||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
||||
|
|
|
@ -5,14 +5,15 @@
|
|||
* @date Oct 11, 2013
|
||||
*/
|
||||
|
||||
#include <gtsam/inference/VariableIndex.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/discrete/DecisionTreeFactor.h>
|
||||
#include <gtsam/discrete/DiscreteFactorGraph.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <gtsam/inference/VariableIndex.h>
|
||||
|
||||
#include <boost/assign/list_of.hpp>
|
||||
#include <iostream>
|
||||
#include <boost/range/adaptor/map.hpp>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace boost;
|
||||
|
@ -23,11 +24,12 @@ using namespace gtsam;
|
|||
* Loopy belief solver for graphs with only binary and unary factors
|
||||
*/
|
||||
class LoopyBelief {
|
||||
|
||||
/** Star graph struct for each node, containing
|
||||
* - the star graph itself
|
||||
* - the product of original unary factors so we don't have to recompute it later, and
|
||||
* - the factor indices of the corrected belief factors of the neighboring nodes
|
||||
* - the product of original unary factors so we don't have to recompute it
|
||||
* later, and
|
||||
* - the factor indices of the corrected belief factors of the neighboring
|
||||
* nodes
|
||||
*/
|
||||
typedef std::map<Key, size_t> CorrectedBeliefIndices;
|
||||
struct StarGraph {
|
||||
|
@ -36,41 +38,41 @@ class LoopyBelief {
|
|||
DecisionTreeFactor::shared_ptr unary;
|
||||
VariableIndex varIndex_;
|
||||
StarGraph(const DiscreteFactorGraph::shared_ptr& _star,
|
||||
const CorrectedBeliefIndices& _beliefIndices,
|
||||
const DecisionTreeFactor::shared_ptr& _unary) :
|
||||
star(_star), correctedBeliefIndices(_beliefIndices), unary(_unary), varIndex_(
|
||||
*_star) {
|
||||
}
|
||||
const CorrectedBeliefIndices& _beliefIndices,
|
||||
const DecisionTreeFactor::shared_ptr& _unary)
|
||||
: star(_star),
|
||||
correctedBeliefIndices(_beliefIndices),
|
||||
unary(_unary),
|
||||
varIndex_(*_star) {}
|
||||
|
||||
void print(const std::string& s = "") const {
|
||||
cout << s << ":" << endl;
|
||||
star->print("Star graph: ");
|
||||
for(Key key: correctedBeliefIndices | boost::adaptors::map_keys) {
|
||||
for (Key key : correctedBeliefIndices | boost::adaptors::map_keys) {
|
||||
cout << "Belief factor index for " << key << ": "
|
||||
<< correctedBeliefIndices.at(key) << endl;
|
||||
<< correctedBeliefIndices.at(key) << endl;
|
||||
}
|
||||
if (unary)
|
||||
unary->print("Unary: ");
|
||||
if (unary) unary->print("Unary: ");
|
||||
}
|
||||
};
|
||||
|
||||
typedef std::map<Key, StarGraph> StarGraphs;
|
||||
StarGraphs starGraphs_; ///< star graph at each variable
|
||||
StarGraphs starGraphs_; ///< star graph at each variable
|
||||
|
||||
public:
|
||||
public:
|
||||
/** Constructor
|
||||
* Need all discrete keys to access node's cardinality for creating belief factors
|
||||
* Need all discrete keys to access node's cardinality for creating belief
|
||||
* factors
|
||||
* TODO: so troublesome!!
|
||||
*/
|
||||
LoopyBelief(const DiscreteFactorGraph& graph,
|
||||
const std::map<Key, DiscreteKey>& allDiscreteKeys) :
|
||||
starGraphs_(buildStarGraphs(graph, allDiscreteKeys)) {
|
||||
}
|
||||
const std::map<Key, DiscreteKey>& allDiscreteKeys)
|
||||
: starGraphs_(buildStarGraphs(graph, allDiscreteKeys)) {}
|
||||
|
||||
/// print
|
||||
void print(const std::string& s = "") const {
|
||||
cout << s << ":" << endl;
|
||||
for(Key key: starGraphs_ | boost::adaptors::map_keys) {
|
||||
for (Key key : starGraphs_ | boost::adaptors::map_keys) {
|
||||
starGraphs_.at(key).print((boost::format("Node %d:") % key).str());
|
||||
}
|
||||
}
|
||||
|
@ -79,12 +81,13 @@ public:
|
|||
DiscreteFactorGraph::shared_ptr iterate(
|
||||
const std::map<Key, DiscreteKey>& allDiscreteKeys) {
|
||||
static const bool debug = false;
|
||||
static DiscreteConditional::shared_ptr dummyCond; // unused by-product of elimination
|
||||
static DiscreteConditional::shared_ptr
|
||||
dummyCond; // unused by-product of elimination
|
||||
DiscreteFactorGraph::shared_ptr beliefs(new DiscreteFactorGraph());
|
||||
std::map<Key, std::map<Key, DiscreteFactor::shared_ptr> > allMessages;
|
||||
// Eliminate each star graph
|
||||
for(Key key: starGraphs_ | boost::adaptors::map_keys) {
|
||||
// cout << "***** Node " << key << "*****" << endl;
|
||||
for (Key key : starGraphs_ | boost::adaptors::map_keys) {
|
||||
// cout << "***** Node " << key << "*****" << endl;
|
||||
// initialize belief to the unary factor from the original graph
|
||||
DecisionTreeFactor::shared_ptr beliefAtKey;
|
||||
|
||||
|
@ -92,15 +95,16 @@ public:
|
|||
std::map<Key, DiscreteFactor::shared_ptr> messages;
|
||||
|
||||
// eliminate each neighbor in this star graph one by one
|
||||
for(Key neighbor: starGraphs_.at(key).correctedBeliefIndices | boost::adaptors::map_keys) {
|
||||
for (Key neighbor : starGraphs_.at(key).correctedBeliefIndices |
|
||||
boost::adaptors::map_keys) {
|
||||
DiscreteFactorGraph subGraph;
|
||||
for(size_t factor: starGraphs_.at(key).varIndex_[neighbor]) {
|
||||
for (size_t factor : starGraphs_.at(key).varIndex_[neighbor]) {
|
||||
subGraph.push_back(starGraphs_.at(key).star->at(factor));
|
||||
}
|
||||
if (debug) subGraph.print("------- Subgraph:");
|
||||
DiscreteFactor::shared_ptr message;
|
||||
boost::tie(dummyCond, message) = EliminateDiscrete(subGraph,
|
||||
Ordering(list_of(neighbor)));
|
||||
boost::tie(dummyCond, message) =
|
||||
EliminateDiscrete(subGraph, Ordering(list_of(neighbor)));
|
||||
// store the new factor into messages
|
||||
messages.insert(make_pair(neighbor, message));
|
||||
if (debug) message->print("------- Message: ");
|
||||
|
@ -108,14 +112,12 @@ public:
|
|||
// Belief is the product of all messages and the unary factor
|
||||
// Incorporate new the factor to belief
|
||||
if (!beliefAtKey)
|
||||
beliefAtKey = boost::dynamic_pointer_cast<DecisionTreeFactor>(
|
||||
message);
|
||||
else
|
||||
beliefAtKey =
|
||||
boost::make_shared<DecisionTreeFactor>(
|
||||
(*beliefAtKey)
|
||||
* (*boost::dynamic_pointer_cast<DecisionTreeFactor>(
|
||||
message)));
|
||||
boost::dynamic_pointer_cast<DecisionTreeFactor>(message);
|
||||
else
|
||||
beliefAtKey = boost::make_shared<DecisionTreeFactor>(
|
||||
(*beliefAtKey) *
|
||||
(*boost::dynamic_pointer_cast<DecisionTreeFactor>(message)));
|
||||
}
|
||||
if (starGraphs_.at(key).unary)
|
||||
beliefAtKey = boost::make_shared<DecisionTreeFactor>(
|
||||
|
@ -133,7 +135,8 @@ public:
|
|||
sumFactorTable = (boost::format("%s %f") % sumFactorTable % sum).str();
|
||||
DecisionTreeFactor sumFactor(allDiscreteKeys.at(key), sumFactorTable);
|
||||
if (debug) sumFactor.print("denomFactor: ");
|
||||
beliefAtKey = boost::make_shared<DecisionTreeFactor>((*beliefAtKey) / sumFactor);
|
||||
beliefAtKey =
|
||||
boost::make_shared<DecisionTreeFactor>((*beliefAtKey) / sumFactor);
|
||||
if (debug) beliefAtKey->print("New belief at key normalized: ");
|
||||
beliefs->push_back(beliefAtKey);
|
||||
allMessages[key] = messages;
|
||||
|
@ -141,17 +144,20 @@ public:
|
|||
|
||||
// Update corrected beliefs
|
||||
VariableIndex beliefFactors(*beliefs);
|
||||
for(Key key: starGraphs_ | boost::adaptors::map_keys) {
|
||||
for (Key key : starGraphs_ | boost::adaptors::map_keys) {
|
||||
std::map<Key, DiscreteFactor::shared_ptr> messages = allMessages[key];
|
||||
for(Key neighbor: starGraphs_.at(key).correctedBeliefIndices | boost::adaptors::map_keys) {
|
||||
DecisionTreeFactor correctedBelief = (*boost::dynamic_pointer_cast<
|
||||
DecisionTreeFactor>(beliefs->at(beliefFactors[key].front())))
|
||||
/ (*boost::dynamic_pointer_cast<DecisionTreeFactor>(
|
||||
for (Key neighbor : starGraphs_.at(key).correctedBeliefIndices |
|
||||
boost::adaptors::map_keys) {
|
||||
DecisionTreeFactor correctedBelief =
|
||||
(*boost::dynamic_pointer_cast<DecisionTreeFactor>(
|
||||
beliefs->at(beliefFactors[key].front()))) /
|
||||
(*boost::dynamic_pointer_cast<DecisionTreeFactor>(
|
||||
messages.at(neighbor)));
|
||||
if (debug) correctedBelief.print("correctedBelief: ");
|
||||
size_t beliefIndex = starGraphs_.at(neighbor).correctedBeliefIndices.at(
|
||||
key);
|
||||
starGraphs_.at(neighbor).star->replace(beliefIndex,
|
||||
size_t beliefIndex =
|
||||
starGraphs_.at(neighbor).correctedBeliefIndices.at(key);
|
||||
starGraphs_.at(neighbor).star->replace(
|
||||
beliefIndex,
|
||||
boost::make_shared<DecisionTreeFactor>(correctedBelief));
|
||||
}
|
||||
}
|
||||
|
@ -161,21 +167,22 @@ public:
|
|||
return beliefs;
|
||||
}
|
||||
|
||||
private:
|
||||
private:
|
||||
/**
|
||||
* Build star graphs for each node.
|
||||
*/
|
||||
StarGraphs buildStarGraphs(const DiscreteFactorGraph& graph,
|
||||
StarGraphs buildStarGraphs(
|
||||
const DiscreteFactorGraph& graph,
|
||||
const std::map<Key, DiscreteKey>& allDiscreteKeys) const {
|
||||
StarGraphs starGraphs;
|
||||
VariableIndex varIndex(graph); ///< access to all factors of each node
|
||||
for(Key key: varIndex | boost::adaptors::map_keys) {
|
||||
VariableIndex varIndex(graph); ///< access to all factors of each node
|
||||
for (Key key : varIndex | boost::adaptors::map_keys) {
|
||||
// initialize to multiply with other unary factors later
|
||||
DecisionTreeFactor::shared_ptr prodOfUnaries;
|
||||
|
||||
// collect all factors involving this key in the original graph
|
||||
DiscreteFactorGraph::shared_ptr star(new DiscreteFactorGraph());
|
||||
for(size_t factorIndex: varIndex[key]) {
|
||||
for (size_t factorIndex : varIndex[key]) {
|
||||
star->push_back(graph.at(factorIndex));
|
||||
|
||||
// accumulate unary factors
|
||||
|
@ -185,9 +192,9 @@ private:
|
|||
graph.at(factorIndex));
|
||||
else
|
||||
prodOfUnaries = boost::make_shared<DecisionTreeFactor>(
|
||||
*prodOfUnaries
|
||||
* (*boost::dynamic_pointer_cast<DecisionTreeFactor>(
|
||||
graph.at(factorIndex))));
|
||||
*prodOfUnaries *
|
||||
(*boost::dynamic_pointer_cast<DecisionTreeFactor>(
|
||||
graph.at(factorIndex))));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -196,7 +203,7 @@ private:
|
|||
KeySet neighbors = star->keys();
|
||||
neighbors.erase(key);
|
||||
CorrectedBeliefIndices correctedBeliefIndices;
|
||||
for(Key neighbor: neighbors) {
|
||||
for (Key neighbor : neighbors) {
|
||||
// TODO: default table for keys with more than 2 values?
|
||||
string initialBelief;
|
||||
for (size_t v = 0; v < allDiscreteKeys.at(neighbor).second - 1; ++v) {
|
||||
|
@ -207,9 +214,8 @@ private:
|
|||
DecisionTreeFactor(allDiscreteKeys.at(neighbor), initialBelief));
|
||||
correctedBeliefIndices.insert(make_pair(neighbor, star->size() - 1));
|
||||
}
|
||||
starGraphs.insert(
|
||||
make_pair(key,
|
||||
StarGraph(star, correctedBeliefIndices, prodOfUnaries)));
|
||||
starGraphs.insert(make_pair(
|
||||
key, StarGraph(star, correctedBeliefIndices, prodOfUnaries)));
|
||||
}
|
||||
return starGraphs;
|
||||
}
|
||||
|
@ -249,7 +255,6 @@ TEST_UNSAFE(LoopyBelief, construction) {
|
|||
DiscreteFactorGraph::shared_ptr beliefs = solver.iterate(allKeys);
|
||||
beliefs->print();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -5,14 +5,13 @@
|
|||
*/
|
||||
|
||||
//#define ENABLE_TIMING
|
||||
#include <gtsam_unstable/discrete/Scheduler.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam/base/Testable.h>
|
||||
#include <gtsam/base/timing.h>
|
||||
#include <gtsam_unstable/discrete/Scheduler.h>
|
||||
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
#include <boost/assign/std/map.hpp>
|
||||
#include <boost/assign/std/vector.hpp>
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
using namespace boost::assign;
|
||||
|
@ -22,7 +21,6 @@ using namespace gtsam;
|
|||
/* ************************************************************************* */
|
||||
// Create the expected graph of constraints
|
||||
DiscreteFactorGraph createExpected() {
|
||||
|
||||
// Start building
|
||||
size_t nrFaculty = 4, nrTimeSlots = 3;
|
||||
|
||||
|
@ -47,27 +45,27 @@ DiscreteFactorGraph createExpected() {
|
|||
string available = "1 1 1 0 1 1 1 1 0 1 1 1";
|
||||
|
||||
// Akansel
|
||||
expected.add(A1, faculty_in_A); // Area 1
|
||||
expected.add(A1, "1 1 1 0"); // Advisor
|
||||
expected.add(A1, faculty_in_A); // Area 1
|
||||
expected.add(A1, "1 1 1 0"); // Advisor
|
||||
expected.add(A & A1, available);
|
||||
expected.add(A2, faculty_in_M); // Area 2
|
||||
expected.add(A2, "1 1 1 0"); // Advisor
|
||||
expected.add(A2, faculty_in_M); // Area 2
|
||||
expected.add(A2, "1 1 1 0"); // Advisor
|
||||
expected.add(A & A2, available);
|
||||
expected.add(A3, faculty_in_P); // Area 3
|
||||
expected.add(A3, "1 1 1 0"); // Advisor
|
||||
expected.add(A3, faculty_in_P); // Area 3
|
||||
expected.add(A3, "1 1 1 0"); // Advisor
|
||||
expected.add(A & A3, available);
|
||||
// Mutual exclusion for faculty
|
||||
expected.addAllDiff(A1 & A2 & A3);
|
||||
|
||||
// Jake
|
||||
expected.add(J1, faculty_in_H); // Area 1
|
||||
expected.add(J1, "1 0 1 1"); // Advisor
|
||||
expected.add(J1, faculty_in_H); // Area 1
|
||||
expected.add(J1, "1 0 1 1"); // Advisor
|
||||
expected.add(J & J1, available);
|
||||
expected.add(J2, faculty_in_C); // Area 2
|
||||
expected.add(J2, "1 0 1 1"); // Advisor
|
||||
expected.add(J2, faculty_in_C); // Area 2
|
||||
expected.add(J2, "1 0 1 1"); // Advisor
|
||||
expected.add(J & J2, available);
|
||||
expected.add(J3, faculty_in_A); // Area 3
|
||||
expected.add(J3, "1 0 1 1"); // Advisor
|
||||
expected.add(J3, faculty_in_A); // Area 3
|
||||
expected.add(J3, "1 0 1 1"); // Advisor
|
||||
expected.add(J & J3, available);
|
||||
// Mutual exclusion for faculty
|
||||
expected.addAllDiff(J1 & J2 & J3);
|
||||
|
@ -79,8 +77,7 @@ DiscreteFactorGraph createExpected() {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( schedulingExample, test)
|
||||
{
|
||||
TEST(schedulingExample, test) {
|
||||
Scheduler s(2);
|
||||
|
||||
// add faculty
|
||||
|
@ -121,7 +118,7 @@ TEST( schedulingExample, test)
|
|||
|
||||
// Do brute force product and output that to file
|
||||
DecisionTreeFactor product = s.product();
|
||||
//product.dot("scheduling", false);
|
||||
// product.dot("scheduling", false);
|
||||
|
||||
// Do exact inference
|
||||
gttic(small);
|
||||
|
@ -129,25 +126,24 @@ TEST( schedulingExample, test)
|
|||
gttoc(small);
|
||||
|
||||
// print MPE, commented out as unit tests don't print
|
||||
// s.printAssignment(MPE);
|
||||
// s.printAssignment(MPE);
|
||||
|
||||
// Commented out as does not work yet
|
||||
// s.runArcConsistency(8,10,true);
|
||||
|
||||
// find the assignment of students to slots with most possible committees
|
||||
// Commented out as not implemented yet
|
||||
// sharedValues bestSchedule = s.bestSchedule();
|
||||
// GTSAM_PRINT(*bestSchedule);
|
||||
// sharedValues bestSchedule = s.bestSchedule();
|
||||
// GTSAM_PRINT(*bestSchedule);
|
||||
|
||||
// find the corresponding most desirable committee assignment
|
||||
// Commented out as not implemented yet
|
||||
// sharedValues bestAssignment = s.bestAssignment(bestSchedule);
|
||||
// GTSAM_PRINT(*bestAssignment);
|
||||
// sharedValues bestAssignment = s.bestAssignment(bestSchedule);
|
||||
// GTSAM_PRINT(*bestAssignment);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( schedulingExample, smallFromFile)
|
||||
{
|
||||
TEST(schedulingExample, smallFromFile) {
|
||||
string path(TOPSRCDIR "/gtsam_unstable/discrete/examples/");
|
||||
Scheduler s(2, path + "small.csv");
|
||||
|
||||
|
@ -179,4 +175,3 @@ int main() {
|
|||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
||||
|
|
|
@ -5,21 +5,22 @@
|
|||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#include <gtsam_unstable/discrete/CSP.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
#include <gtsam_unstable/discrete/CSP.h>
|
||||
|
||||
#include <boost/assign/std/map.hpp>
|
||||
using boost::assign::insert;
|
||||
#include <stdarg.h>
|
||||
|
||||
#include <iostream>
|
||||
#include <sstream>
|
||||
#include <stdarg.h>
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
#define PRINT false
|
||||
|
||||
class Sudoku: public CSP {
|
||||
|
||||
class Sudoku : public CSP {
|
||||
/// sudoku size
|
||||
size_t n_;
|
||||
|
||||
|
@ -27,25 +28,21 @@ class Sudoku: public CSP {
|
|||
typedef std::pair<size_t, size_t> IJ;
|
||||
std::map<IJ, DiscreteKey> dkeys_;
|
||||
|
||||
public:
|
||||
|
||||
public:
|
||||
/// return DiscreteKey for cell(i,j)
|
||||
const DiscreteKey& dkey(size_t i, size_t j) const {
|
||||
return dkeys_.at(IJ(i, j));
|
||||
}
|
||||
|
||||
/// return Key for cell(i,j)
|
||||
Key key(size_t i, size_t j) const {
|
||||
return dkey(i, j).first;
|
||||
}
|
||||
Key key(size_t i, size_t j) const { return dkey(i, j).first; }
|
||||
|
||||
/// Constructor
|
||||
Sudoku(size_t n, ...) :
|
||||
n_(n) {
|
||||
Sudoku(size_t n, ...) : n_(n) {
|
||||
// Create variables, ordering, and unary constraints
|
||||
va_list ap;
|
||||
va_start(ap, n);
|
||||
Key k=0;
|
||||
Key k = 0;
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
for (size_t j = 0; j < n; ++j, ++k) {
|
||||
// create the key
|
||||
|
@ -56,23 +53,21 @@ public:
|
|||
// cout << value << " ";
|
||||
if (value != 0) addSingleValue(dkeys_[ij], value - 1);
|
||||
}
|
||||
//cout << endl;
|
||||
// cout << endl;
|
||||
}
|
||||
va_end(ap);
|
||||
|
||||
// add row constraints
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
DiscreteKeys dkeys;
|
||||
for (size_t j = 0; j < n; j++)
|
||||
dkeys += dkey(i, j);
|
||||
for (size_t j = 0; j < n; j++) dkeys += dkey(i, j);
|
||||
addAllDiff(dkeys);
|
||||
}
|
||||
|
||||
// add col constraints
|
||||
for (size_t j = 0; j < n; j++) {
|
||||
DiscreteKeys dkeys;
|
||||
for (size_t i = 0; i < n; i++)
|
||||
dkeys += dkey(i, j);
|
||||
for (size_t i = 0; i < n; i++) dkeys += dkey(i, j);
|
||||
addAllDiff(dkeys);
|
||||
}
|
||||
|
||||
|
@ -84,8 +79,7 @@ public:
|
|||
// Box I,J
|
||||
DiscreteKeys dkeys;
|
||||
for (size_t i = i0; i < i0 + N; i++)
|
||||
for (size_t j = j0; j < j0 + N; j++)
|
||||
dkeys += dkey(i, j);
|
||||
for (size_t j = j0; j < j0 + N; j++) dkeys += dkey(i, j);
|
||||
addAllDiff(dkeys);
|
||||
j0 += N;
|
||||
}
|
||||
|
@ -109,74 +103,66 @@ public:
|
|||
DiscreteFactor::sharedValues MPE = optimalAssignment();
|
||||
printAssignment(MPE);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( Sudoku, small)
|
||||
{
|
||||
Sudoku csp(4,
|
||||
1,0, 0,4,
|
||||
0,0, 0,0,
|
||||
|
||||
4,0, 2,0,
|
||||
0,1, 0,0);
|
||||
TEST_UNSAFE(Sudoku, small) {
|
||||
Sudoku csp(4, //
|
||||
1, 0, 0, 4, //
|
||||
0, 0, 0, 0, //
|
||||
4, 0, 2, 0, //
|
||||
0, 1, 0, 0);
|
||||
|
||||
// Do BP
|
||||
csp.runArcConsistency(4,10,PRINT);
|
||||
csp.runArcConsistency(4, 10, PRINT);
|
||||
|
||||
// optimize and check
|
||||
CSP::sharedValues solution = csp.optimalAssignment();
|
||||
CSP::Values expected;
|
||||
insert(expected)
|
||||
(csp.key(0,0), 0)(csp.key(0,1), 1)(csp.key(0,2), 2)(csp.key(0,3), 3)
|
||||
(csp.key(1,0), 2)(csp.key(1,1), 3)(csp.key(1,2), 0)(csp.key(1,3), 1)
|
||||
(csp.key(2,0), 3)(csp.key(2,1), 2)(csp.key(2,2), 1)(csp.key(2,3), 0)
|
||||
(csp.key(3,0), 1)(csp.key(3,1), 0)(csp.key(3,2), 3)(csp.key(3,3), 2);
|
||||
EXPECT(assert_equal(expected,*solution));
|
||||
//csp.printAssignment(solution);
|
||||
insert(expected)(csp.key(0, 0), 0)(csp.key(0, 1), 1)(csp.key(0, 2), 2)(
|
||||
csp.key(0, 3), 3)(csp.key(1, 0), 2)(csp.key(1, 1), 3)(csp.key(1, 2), 0)(
|
||||
csp.key(1, 3), 1)(csp.key(2, 0), 3)(csp.key(2, 1), 2)(csp.key(2, 2), 1)(
|
||||
csp.key(2, 3), 0)(csp.key(3, 0), 1)(csp.key(3, 1), 0)(csp.key(3, 2), 3)(
|
||||
csp.key(3, 3), 2);
|
||||
EXPECT(assert_equal(expected, *solution));
|
||||
// csp.printAssignment(solution);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( Sudoku, easy)
|
||||
{
|
||||
Sudoku sudoku(9,
|
||||
0,0,5, 0,9,0, 0,0,1,
|
||||
0,0,0, 0,0,2, 0,7,3,
|
||||
7,6,0, 0,0,8, 2,0,0,
|
||||
TEST_UNSAFE(Sudoku, easy) {
|
||||
Sudoku sudoku(9, //
|
||||
0, 0, 5, 0, 9, 0, 0, 0, 1, //
|
||||
0, 0, 0, 0, 0, 2, 0, 7, 3, //
|
||||
7, 6, 0, 0, 0, 8, 2, 0, 0, //
|
||||
|
||||
0,1,2, 0,0,9, 0,0,4,
|
||||
0,0,0, 2,0,3, 0,0,0,
|
||||
3,0,0, 1,0,0, 9,6,0,
|
||||
0, 1, 2, 0, 0, 9, 0, 0, 4, //
|
||||
0, 0, 0, 2, 0, 3, 0, 0, 0, //
|
||||
3, 0, 0, 1, 0, 0, 9, 6, 0, //
|
||||
|
||||
0,0,1, 9,0,0, 0,5,8,
|
||||
9,7,0, 5,0,0, 0,0,0,
|
||||
5,0,0, 0,3,0, 7,0,0);
|
||||
0, 0, 1, 9, 0, 0, 0, 5, 8, //
|
||||
9, 7, 0, 5, 0, 0, 0, 0, 0, //
|
||||
5, 0, 0, 0, 3, 0, 7, 0, 0);
|
||||
|
||||
// Do BP
|
||||
sudoku.runArcConsistency(4,10,PRINT);
|
||||
sudoku.runArcConsistency(4, 10, PRINT);
|
||||
|
||||
// sudoku.printSolution(); // don't do it
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( Sudoku, extreme)
|
||||
{
|
||||
Sudoku sudoku(9,
|
||||
0,0,9, 7,4,8, 0,0,0,
|
||||
7,0,0, 0,0,0, 0,0,0,
|
||||
0,2,0, 1,0,9, 0,0,0,
|
||||
|
||||
0,0,7, 0,0,0, 2,4,0,
|
||||
0,6,4, 0,1,0, 5,9,0,
|
||||
0,9,8, 0,0,0, 3,0,0,
|
||||
|
||||
0,0,0, 8,0,3, 0,2,0,
|
||||
0,0,0, 0,0,0, 0,0,6,
|
||||
0,0,0, 2,7,5, 9,0,0);
|
||||
TEST_UNSAFE(Sudoku, extreme) {
|
||||
Sudoku sudoku(9, //
|
||||
0, 0, 9, 7, 4, 8, 0, 0, 0, 7, //
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 2, //
|
||||
0, 1, 0, 9, 0, 0, 0, 0, 0, 7, //
|
||||
0, 0, 0, 2, 4, 0, 0, 6, 4, 0, //
|
||||
1, 0, 5, 9, 0, 0, 9, 8, 0, 0, //
|
||||
0, 3, 0, 0, 0, 0, 0, 8, 0, 3, //
|
||||
0, 2, 0, 0, 0, 0, 0, 0, 0, 0, //
|
||||
0, 6, 0, 0, 0, 2, 7, 5, 9, 0, 0);
|
||||
|
||||
// Do BP
|
||||
sudoku.runArcConsistency(9,10,PRINT);
|
||||
sudoku.runArcConsistency(9, 10, PRINT);
|
||||
|
||||
#ifdef METIS
|
||||
VariableIndexOrdered index(sudoku);
|
||||
|
@ -185,29 +171,28 @@ TEST_UNSAFE( Sudoku, extreme)
|
|||
index.outputMetisFormat(os);
|
||||
#endif
|
||||
|
||||
//sudoku.printSolution(); // don't do it
|
||||
// sudoku.printSolution(); // don't do it
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( Sudoku, AJC_3star_Feb8_2012)
|
||||
{
|
||||
Sudoku sudoku(9,
|
||||
9,5,0, 0,0,6, 0,0,0,
|
||||
0,8,4, 0,7,0, 0,0,0,
|
||||
6,2,0, 5,0,0, 4,0,0,
|
||||
TEST_UNSAFE(Sudoku, AJC_3star_Feb8_2012) {
|
||||
Sudoku sudoku(9, //
|
||||
9, 5, 0, 0, 0, 6, 0, 0, 0, //
|
||||
0, 8, 4, 0, 7, 0, 0, 0, 0, //
|
||||
6, 2, 0, 5, 0, 0, 4, 0, 0, //
|
||||
|
||||
0,0,0, 2,9,0, 6,0,0,
|
||||
0,9,0, 0,0,0, 0,2,0,
|
||||
0,0,2, 0,6,3, 0,0,0,
|
||||
0, 0, 0, 2, 9, 0, 6, 0, 0, //
|
||||
0, 9, 0, 0, 0, 0, 0, 2, 0, //
|
||||
0, 0, 2, 0, 6, 3, 0, 0, 0, //
|
||||
|
||||
0,0,9, 0,0,7, 0,6,8,
|
||||
0,0,0, 0,3,0, 2,9,0,
|
||||
0,0,0, 1,0,0, 0,3,7);
|
||||
0, 0, 9, 0, 0, 7, 0, 6, 8, //
|
||||
0, 0, 0, 0, 3, 0, 2, 9, 0, //
|
||||
0, 0, 0, 1, 0, 0, 0, 3, 7);
|
||||
|
||||
// Do BP
|
||||
sudoku.runArcConsistency(9,10,PRINT);
|
||||
sudoku.runArcConsistency(9, 10, PRINT);
|
||||
|
||||
//sudoku.printSolution(); // don't do it
|
||||
// sudoku.printSolution(); // don't do it
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -216,4 +201,3 @@ int main() {
|
|||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
||||
|
|
|
@ -226,7 +226,7 @@ pair<QP, QP> testParser(QPSParser parser) {
|
|||
expected.inequalities.add(X1, -I_1x1, 0, 2); // x >= 0
|
||||
expected.inequalities.add(X2, -I_1x1, 0, 3); // y > = 0
|
||||
return {expected, exampleqp};
|
||||
};
|
||||
}
|
||||
|
||||
TEST(QPSolver, ParserSyntaticTest) {
|
||||
auto result = testParser(QPSParser("QPExample.QPS"));
|
||||
|
|
|
@ -56,7 +56,8 @@ private:
|
|||
bool flag_bump_up_near_zero_probs_;
|
||||
|
||||
/** concept check by type */
|
||||
GTSAM_CONCEPT_LIE_TYPE(T)GTSAM_CONCEPT_TESTABLE_TYPE(T)
|
||||
GTSAM_CONCEPT_LIE_TYPE(T)
|
||||
GTSAM_CONCEPT_TESTABLE_TYPE(T)
|
||||
|
||||
public:
|
||||
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
# SLAM Factors
|
||||
|
||||
## SmartFactors
|
||||
|
||||
These are "structure-less" factors, i.e., rather than introducing a new variable for an observed 3D point or landmark, a single factor is created that provides a multi-view constraint on several poses and/or cameras.
|
||||
|
||||
### SmartRangeFactor
|
||||
|
||||
An experiment in creating a structure-less 2D range-SLAM factor with range-only measurements.
|
||||
It uses a sophisticated `triangulate` logic based on circle intersections.
|
||||
|
||||
### SmartStereoProjectionFactor
|
||||
|
||||
Version of `SmartProjectionFactor` for stereo observations, specializes SmartFactorBase for `CAMERA == StereoCamera`.
|
||||
|
||||
TODO: a lot of commented out code and could move a lot to .cpp file.
|
||||
|
||||
### SmartStereoProjectionPoseFactor
|
||||
|
||||
Derives from `SmartStereoProjectionFactor` but adds an array of `Cal3_S2Stereo` calibration objects .
|
||||
|
||||
TODO: Again, as no template arguments, we could move a lot to .cpp file.
|
||||
|
||||
### SmartStereoProjectionFactorPP
|
||||
|
||||
Similar `SmartStereoProjectionPoseFactor` but *additionally* adds an array of body_P_cam poses. The dimensions seem to be hardcoded and the types defined in the SmartFactorBase have been re-defined.
|
||||
The body_P_cam poses are optimized here!
|
||||
|
||||
TODO: See above, same issues as `SmartStereoProjectionPoseFactor`.
|
||||
|
||||
### SmartProjectionPoseFactorRollingShutter
|
||||
|
||||
Is templated on a `CAMERA` type and derives from `SmartProjectionFactor`.
|
||||
|
||||
This factor optimizes two consecutive poses of a body assuming a rolling
|
||||
shutter model of the camera with given readout time. The factor requires that
|
||||
values contain (for each 2D observation) two consecutive camera poses from
|
||||
which the 2D observation pose can be interpolated.
|
||||
|
||||
TODO: the dimensions seem to be hardcoded and the types defined in the SmartFactorBase have been re-defined. Also, possibly a lot of copy/paste computation of things that (should) happen in base class.
|
|
@ -43,13 +43,12 @@ namespace gtsam {
|
|||
template <class CAMERA>
|
||||
class SmartProjectionPoseFactorRollingShutter
|
||||
: public SmartProjectionFactor<CAMERA> {
|
||||
public:
|
||||
private:
|
||||
typedef SmartProjectionFactor<CAMERA> Base;
|
||||
typedef SmartProjectionPoseFactorRollingShutter<CAMERA> This;
|
||||
typedef typename CAMERA::CalibrationType CALIBRATION;
|
||||
|
||||
protected:
|
||||
/// shared pointer to calibration object (one for each observation)
|
||||
std::vector<boost::shared_ptr<CALIBRATION>> K_all_;
|
||||
|
||||
/// The keys of the pose of the body (with respect to an external world
|
||||
/// frame): two consecutive poses for each observation
|
||||
std::vector<std::pair<Key, Key>> world_P_body_key_pairs_;
|
||||
|
@ -58,17 +57,19 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
/// pair of consecutive poses
|
||||
std::vector<double> alphas_;
|
||||
|
||||
/// Pose of the camera in the body frame
|
||||
std::vector<Pose3> body_P_sensors_;
|
||||
/// one or more cameras taking observations (fixed poses wrt body + fixed
|
||||
/// intrinsics)
|
||||
boost::shared_ptr<typename Base::Cameras> cameraRig_;
|
||||
|
||||
/// vector of camera Ids (one for each observation, in the same order),
|
||||
/// identifying which camera took the measurement
|
||||
FastVector<size_t> cameraIds_;
|
||||
|
||||
public:
|
||||
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
|
||||
|
||||
/// shorthand for base class type
|
||||
typedef SmartProjectionFactor<PinholePose<CALIBRATION>> Base;
|
||||
|
||||
/// shorthand for this class
|
||||
typedef SmartProjectionPoseFactorRollingShutter This;
|
||||
typedef CAMERA Camera;
|
||||
typedef CameraSet<CAMERA> Cameras;
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
@ -83,22 +84,37 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
typedef std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD>>
|
||||
FBlocks; // vector of F blocks
|
||||
|
||||
/// Default constructor, only for serialization
|
||||
SmartProjectionPoseFactorRollingShutter() {}
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param Isotropic measurement noise
|
||||
* @param cameraRig set of cameras (fixed poses wrt body and intrinsics)
|
||||
* taking the measurements
|
||||
* @param params internal parameters of the smart factors
|
||||
*/
|
||||
SmartProjectionPoseFactorRollingShutter(
|
||||
const SharedNoiseModel& sharedNoiseModel,
|
||||
const boost::shared_ptr<Cameras>& cameraRig,
|
||||
const SmartProjectionParams& params = SmartProjectionParams())
|
||||
: Base(sharedNoiseModel, params) {}
|
||||
: Base(sharedNoiseModel, params), cameraRig_(cameraRig) {
|
||||
// throw exception if configuration is not supported by this factor
|
||||
if (Base::params_.degeneracyMode != gtsam::ZERO_ON_DEGENERACY)
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionRigFactor: "
|
||||
"degeneracyMode must be set to ZERO_ON_DEGENERACY");
|
||||
if (Base::params_.linearizationMode != gtsam::HESSIAN)
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionRigFactor: "
|
||||
"linearizationMode must be set to HESSIAN");
|
||||
}
|
||||
|
||||
/** Virtual destructor */
|
||||
~SmartProjectionPoseFactorRollingShutter() override = default;
|
||||
|
||||
/**
|
||||
* add a new measurement, with 2 pose keys, interpolation factor, camera
|
||||
* (intrinsic and extrinsic) calibration, and observed pixel.
|
||||
* add a new measurement, with 2 pose keys, interpolation factor, and cameraId
|
||||
* @param measured 2-dimensional location of the projection of a single
|
||||
* landmark in a single view (the measurement), interpolated from the 2 poses
|
||||
* @param world_P_body_key1 key corresponding to the first body poses (time <=
|
||||
|
@ -107,13 +123,11 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
* >= time pixel is acquired)
|
||||
* @param alpha interpolation factor in [0,1], such that if alpha = 0 the
|
||||
* interpolated pose is the same as world_P_body_key1
|
||||
* @param K (fixed) camera intrinsic calibration
|
||||
* @param body_P_sensor (fixed) camera extrinsic calibration
|
||||
* @param cameraId ID of the camera taking the measurement (default 0)
|
||||
*/
|
||||
void add(const Point2& measured, const Key& world_P_body_key1,
|
||||
const Key& world_P_body_key2, const double& alpha,
|
||||
const boost::shared_ptr<CALIBRATION>& K,
|
||||
const Pose3& body_P_sensor = Pose3::identity()) {
|
||||
const size_t& cameraId = 0) {
|
||||
// store measurements in base class
|
||||
this->measured_.push_back(measured);
|
||||
|
||||
|
@ -133,11 +147,8 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
// store interpolation factor
|
||||
alphas_.push_back(alpha);
|
||||
|
||||
// store fixed intrinsic calibration
|
||||
K_all_.push_back(K);
|
||||
|
||||
// store fixed extrinsics of the camera
|
||||
body_P_sensors_.push_back(body_P_sensor);
|
||||
// store id of the camera taking the measurement
|
||||
cameraIds_.push_back(cameraId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -150,56 +161,36 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
* for the i0-th measurement can be interpolated
|
||||
* @param alphas vector of interpolation params (in [0,1]), one for each
|
||||
* measurement (in the same order)
|
||||
* @param Ks vector of (fixed) intrinsic calibration objects
|
||||
* @param body_P_sensors vector of (fixed) extrinsic calibration objects
|
||||
* @param cameraIds IDs of the cameras taking each measurement (same order as
|
||||
* the measurements)
|
||||
*/
|
||||
void add(const Point2Vector& measurements,
|
||||
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs,
|
||||
const std::vector<double>& alphas,
|
||||
const std::vector<boost::shared_ptr<CALIBRATION>>& Ks,
|
||||
const std::vector<Pose3>& body_P_sensors) {
|
||||
assert(world_P_body_key_pairs.size() == measurements.size());
|
||||
assert(world_P_body_key_pairs.size() == alphas.size());
|
||||
assert(world_P_body_key_pairs.size() == Ks.size());
|
||||
const FastVector<size_t>& cameraIds = FastVector<size_t>()) {
|
||||
if (world_P_body_key_pairs.size() != measurements.size() ||
|
||||
world_P_body_key_pairs.size() != alphas.size() ||
|
||||
(world_P_body_key_pairs.size() != cameraIds.size() &&
|
||||
cameraIds.size() != 0)) { // cameraIds.size()=0 is default
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionPoseFactorRollingShutter: "
|
||||
"trying to add inconsistent inputs");
|
||||
}
|
||||
if (cameraIds.size() == 0 && cameraRig_->size() > 1) {
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionPoseFactorRollingShutter: "
|
||||
"camera rig includes multiple camera "
|
||||
"but add did not input cameraIds");
|
||||
}
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
add(measurements[i], world_P_body_key_pairs[i].first,
|
||||
world_P_body_key_pairs[i].second, alphas[i], Ks[i],
|
||||
body_P_sensors[i]);
|
||||
world_P_body_key_pairs[i].second, alphas[i],
|
||||
cameraIds.size() == 0 ? 0
|
||||
: cameraIds[i]); // use 0 as default if
|
||||
// cameraIds was not specified
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Variant of the previous "add" function in which we include multiple
|
||||
* measurements with the same (intrinsic and extrinsic) calibration
|
||||
* @param measurements vector of the 2m dimensional location of the projection
|
||||
* of a single landmark in the m views (the measurements)
|
||||
* @param world_P_body_key_pairs vector where the i-th element contains a pair
|
||||
* of keys corresponding to the pair of poses from which the observation pose
|
||||
* for the i0-th measurement can be interpolated
|
||||
* @param alphas vector of interpolation params (in [0,1]), one for each
|
||||
* measurement (in the same order)
|
||||
* @param K (fixed) camera intrinsic calibration (same for all measurements)
|
||||
* @param body_P_sensor (fixed) camera extrinsic calibration (same for all
|
||||
* measurements)
|
||||
*/
|
||||
void add(const Point2Vector& measurements,
|
||||
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs,
|
||||
const std::vector<double>& alphas,
|
||||
const boost::shared_ptr<CALIBRATION>& K,
|
||||
const Pose3& body_P_sensor = Pose3::identity()) {
|
||||
assert(world_P_body_key_pairs.size() == measurements.size());
|
||||
assert(world_P_body_key_pairs.size() == alphas.size());
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
add(measurements[i], world_P_body_key_pairs[i].first,
|
||||
world_P_body_key_pairs[i].second, alphas[i], K, body_P_sensor);
|
||||
}
|
||||
}
|
||||
|
||||
/// return the calibration object
|
||||
const std::vector<boost::shared_ptr<CALIBRATION>>& calibration() const {
|
||||
return K_all_;
|
||||
}
|
||||
|
||||
/// return (for each observation) the keys of the pair of poses from which we
|
||||
/// interpolate
|
||||
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs() const {
|
||||
|
@ -209,8 +200,11 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
/// return the interpolation factors alphas
|
||||
const std::vector<double>& alphas() const { return alphas_; }
|
||||
|
||||
/// return the extrinsic camera calibration body_P_sensors
|
||||
const std::vector<Pose3>& body_P_sensors() const { return body_P_sensors_; }
|
||||
/// return the calibration object
|
||||
const boost::shared_ptr<Cameras>& cameraRig() const { return cameraRig_; }
|
||||
|
||||
/// return the calibration object
|
||||
const FastVector<size_t>& cameraIds() const { return cameraIds_; }
|
||||
|
||||
/**
|
||||
* print
|
||||
|
@ -221,15 +215,15 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
const std::string& s = "",
|
||||
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override {
|
||||
std::cout << s << "SmartProjectionPoseFactorRollingShutter: \n ";
|
||||
for (size_t i = 0; i < K_all_.size(); i++) {
|
||||
for (size_t i = 0; i < cameraIds_.size(); i++) {
|
||||
std::cout << "-- Measurement nr " << i << std::endl;
|
||||
std::cout << " pose1 key: "
|
||||
<< keyFormatter(world_P_body_key_pairs_[i].first) << std::endl;
|
||||
std::cout << " pose2 key: "
|
||||
<< keyFormatter(world_P_body_key_pairs_[i].second) << std::endl;
|
||||
std::cout << " alpha: " << alphas_[i] << std::endl;
|
||||
body_P_sensors_[i].print("extrinsic calibration:\n");
|
||||
K_all_[i]->print("intrinsic calibration = ");
|
||||
std::cout << "cameraId: " << cameraIds_[i] << std::endl;
|
||||
(*cameraRig_)[cameraIds_[i]].print("camera in rig:\n");
|
||||
}
|
||||
Base::print("", keyFormatter);
|
||||
}
|
||||
|
@ -257,20 +251,48 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
keyPairsEqual = false;
|
||||
}
|
||||
|
||||
double extrinsicCalibrationEqual = true;
|
||||
if (this->body_P_sensors_.size() == e->body_P_sensors().size()) {
|
||||
for (size_t i = 0; i < this->body_P_sensors_.size(); i++) {
|
||||
if (!body_P_sensors_[i].equals(e->body_P_sensors()[i])) {
|
||||
extrinsicCalibrationEqual = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
extrinsicCalibrationEqual = false;
|
||||
}
|
||||
return e && Base::equals(p, tol) && alphas_ == e->alphas() &&
|
||||
keyPairsEqual && cameraRig_->equals(*(e->cameraRig())) &&
|
||||
std::equal(cameraIds_.begin(), cameraIds_.end(),
|
||||
e->cameraIds().begin());
|
||||
}
|
||||
|
||||
return e && Base::equals(p, tol) && K_all_ == e->calibration() &&
|
||||
alphas_ == e->alphas() && keyPairsEqual && extrinsicCalibrationEqual;
|
||||
/**
|
||||
* Collect all cameras involved in this factor
|
||||
* @param values Values structure which must contain camera poses
|
||||
* corresponding to keys involved in this factor
|
||||
* @return Cameras
|
||||
*/
|
||||
typename Base::Cameras cameras(const Values& values) const override {
|
||||
typename Base::Cameras cameras;
|
||||
for (size_t i = 0; i < this->measured_.size();
|
||||
i++) { // for each measurement
|
||||
const Pose3& w_P_body1 =
|
||||
values.at<Pose3>(world_P_body_key_pairs_[i].first);
|
||||
const Pose3& w_P_body2 =
|
||||
values.at<Pose3>(world_P_body_key_pairs_[i].second);
|
||||
double interpolationFactor = alphas_[i];
|
||||
const Pose3& w_P_body =
|
||||
interpolate<Pose3>(w_P_body1, w_P_body2, interpolationFactor);
|
||||
const typename Base::Camera& camera_i = (*cameraRig_)[cameraIds_[i]];
|
||||
const Pose3& body_P_cam = camera_i.pose();
|
||||
const Pose3& w_P_cam = w_P_body.compose(body_P_cam);
|
||||
cameras.emplace_back(w_P_cam,
|
||||
make_shared<typename CAMERA::CalibrationType>(
|
||||
camera_i.calibration()));
|
||||
}
|
||||
return cameras;
|
||||
}
|
||||
|
||||
/**
|
||||
* error calculates the error of the factor.
|
||||
*/
|
||||
double error(const Values& values) const override {
|
||||
if (this->active(values)) {
|
||||
return this->totalReprojectionError(this->cameras(values));
|
||||
} else { // else of active flag
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -305,9 +327,10 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
auto w_P_body =
|
||||
interpolate<Pose3>(w_P_body1, w_P_body2, interpolationFactor,
|
||||
dInterpPose_dPoseBody1, dInterpPose_dPoseBody2);
|
||||
auto body_P_cam = body_P_sensors_[i];
|
||||
const typename Base::Camera& camera_i = (*cameraRig_)[cameraIds_[i]];
|
||||
auto body_P_cam = camera_i.pose();
|
||||
auto w_P_cam = w_P_body.compose(body_P_cam, dPoseCam_dInterpPose);
|
||||
PinholeCamera<CALIBRATION> camera(w_P_cam, *K_all_[i]);
|
||||
PinholeCamera<CALIBRATION> camera(w_P_cam, camera_i.calibration());
|
||||
|
||||
// get jacobians and error vector for current measurement
|
||||
Point2 reprojectionError_i =
|
||||
|
@ -332,7 +355,7 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
|
||||
/// linearize and return a Hessianfactor that is an approximation of error(p)
|
||||
boost::shared_ptr<RegularHessianFactor<DimPose>> createHessianFactor(
|
||||
const Values& values, const double lambda = 0.0,
|
||||
const Values& values, const double& lambda = 0.0,
|
||||
bool diagonalDamping = false) const {
|
||||
// we may have multiple observation sharing the same keys (due to the
|
||||
// rolling shutter interpolation), hence the number of unique keys may be
|
||||
|
@ -341,19 +364,21 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
this->keys_
|
||||
.size(); // note: by construction, keys_ only contains unique keys
|
||||
|
||||
typename Base::Cameras cameras = this->cameras(values);
|
||||
|
||||
// Create structures for Hessian Factors
|
||||
KeyVector js;
|
||||
std::vector<Matrix> Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2);
|
||||
std::vector<Vector> gs(nrUniqueKeys);
|
||||
|
||||
if (this->measured_.size() !=
|
||||
this->cameras(values).size()) // 1 observation per interpolated camera
|
||||
cameras.size()) // 1 observation per interpolated camera
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionPoseFactorRollingShutter: "
|
||||
"measured_.size() inconsistent with input");
|
||||
|
||||
// triangulate 3D point at given linearization point
|
||||
this->triangulateSafe(this->cameras(values));
|
||||
this->triangulateSafe(cameras);
|
||||
|
||||
if (!this->result_) { // failed: return "empty/zero" Hessian
|
||||
if (this->params_.degeneracyMode == ZERO_ON_DEGENERACY) {
|
||||
|
@ -399,46 +424,6 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
this->keys_, augmentedHessianUniqueKeys);
|
||||
}
|
||||
|
||||
/**
|
||||
* error calculates the error of the factor.
|
||||
*/
|
||||
double error(const Values& values) const override {
|
||||
if (this->active(values)) {
|
||||
return this->totalReprojectionError(this->cameras(values));
|
||||
} else { // else of active flag
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect all cameras involved in this factor
|
||||
* @param values Values structure which must contain camera poses
|
||||
* corresponding to keys involved in this factor
|
||||
* @return Cameras
|
||||
*/
|
||||
typename Base::Cameras cameras(const Values& values) const override {
|
||||
size_t numViews = this->measured_.size();
|
||||
assert(numViews == K_all_.size());
|
||||
assert(numViews == alphas_.size());
|
||||
assert(numViews == body_P_sensors_.size());
|
||||
assert(numViews == world_P_body_key_pairs_.size());
|
||||
|
||||
typename Base::Cameras cameras;
|
||||
for (size_t i = 0; i < numViews; i++) { // for each measurement
|
||||
const Pose3& w_P_body1 =
|
||||
values.at<Pose3>(world_P_body_key_pairs_[i].first);
|
||||
const Pose3& w_P_body2 =
|
||||
values.at<Pose3>(world_P_body_key_pairs_[i].second);
|
||||
double interpolationFactor = alphas_[i];
|
||||
const Pose3& w_P_body =
|
||||
interpolate<Pose3>(w_P_body1, w_P_body2, interpolationFactor);
|
||||
const Pose3& body_P_cam = body_P_sensors_[i];
|
||||
const Pose3& w_P_cam = w_P_body.compose(body_P_cam);
|
||||
cameras.emplace_back(w_P_cam, K_all_[i]);
|
||||
}
|
||||
return cameras;
|
||||
}
|
||||
|
||||
/**
|
||||
* Linearize to Gaussian Factor (possibly adding a damping factor Lambda for
|
||||
* LM)
|
||||
|
@ -447,7 +432,7 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
* @return a Gaussian factor
|
||||
*/
|
||||
boost::shared_ptr<GaussianFactor> linearizeDamped(
|
||||
const Values& values, const double lambda = 0.0) const {
|
||||
const Values& values, const double& lambda = 0.0) const {
|
||||
// depending on flag set on construction we may linearize to different
|
||||
// linear factors
|
||||
switch (this->params_.linearizationMode) {
|
||||
|
@ -455,8 +440,8 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
return this->createHessianFactor(values, lambda);
|
||||
default:
|
||||
throw std::runtime_error(
|
||||
"SmartProjectionPoseFactorRollingShutter: unknown linearization "
|
||||
"mode");
|
||||
"SmartProjectionPoseFactorRollingShutter: "
|
||||
"unknown linearization mode");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -472,7 +457,6 @@ class SmartProjectionPoseFactorRollingShutter
|
|||
template <class ARCHIVE>
|
||||
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
|
||||
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar& BOOST_SERIALIZATION_NVP(K_all_);
|
||||
}
|
||||
};
|
||||
// end of class declaration
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
/**
|
||||
* @file SmartStereoProjectionFactor.h
|
||||
* @brief Smart stereo factor on StereoCameras (pose + calibration)
|
||||
* @brief Smart stereo factor on StereoCameras (pose)
|
||||
* @author Luca Carlone
|
||||
* @author Zsolt Kira
|
||||
* @author Frank Dellaert
|
||||
|
@ -447,23 +447,23 @@ public:
|
|||
}
|
||||
|
||||
/**
|
||||
* This corrects the Jacobians and error vector for the case in which the right pixel in the monocular camera is missing (nan)
|
||||
* This corrects the Jacobians and error vector for the case in which the
|
||||
* right 2D measurement in the monocular camera is missing (nan).
|
||||
*/
|
||||
void correctForMissingMeasurements(const Cameras& cameras, Vector& ue,
|
||||
boost::optional<typename Cameras::FBlocks&> Fs = boost::none,
|
||||
boost::optional<Matrix&> E = boost::none) const override
|
||||
{
|
||||
void correctForMissingMeasurements(
|
||||
const Cameras& cameras, Vector& ue,
|
||||
boost::optional<typename Cameras::FBlocks&> Fs = boost::none,
|
||||
boost::optional<Matrix&> E = boost::none) const override {
|
||||
// when using stereo cameras, some of the measurements might be missing:
|
||||
for(size_t i=0; i < cameras.size(); i++){
|
||||
for (size_t i = 0; i < cameras.size(); i++) {
|
||||
const StereoPoint2& z = measured_.at(i);
|
||||
if(std::isnan(z.uR())) // if the right pixel is invalid
|
||||
if (std::isnan(z.uR())) // if the right 2D measurement is invalid
|
||||
{
|
||||
if(Fs){ // delete influence of right point on jacobian Fs
|
||||
if (Fs) { // delete influence of right point on jacobian Fs
|
||||
MatrixZD& Fi = Fs->at(i);
|
||||
for(size_t ii=0; ii<Dim; ii++)
|
||||
Fi(1,ii) = 0.0;
|
||||
for (size_t ii = 0; ii < Dim; ii++) Fi(1, ii) = 0.0;
|
||||
}
|
||||
if(E) // delete influence of right point on jacobian E
|
||||
if (E) // delete influence of right point on jacobian E
|
||||
E->row(ZDim * i + 1) = Matrix::Zero(1, E->cols());
|
||||
|
||||
// set the corresponding entry of vector ue to zero
|
||||
|
|
|
@ -33,9 +33,11 @@ namespace gtsam {
|
|||
*/
|
||||
|
||||
/**
|
||||
* This factor optimizes the pose of the body as well as the extrinsic camera calibration (pose of camera wrt body).
|
||||
* Each camera may have its own extrinsic calibration or the same calibration can be shared by multiple cameras.
|
||||
* This factor requires that values contain the involved poses and extrinsics (both are Pose3 variables).
|
||||
* This factor optimizes the pose of the body as well as the extrinsic camera
|
||||
* calibration (pose of camera wrt body). Each camera may have its own extrinsic
|
||||
* calibration or the same calibration can be shared by multiple cameras. This
|
||||
* factor requires that values contain the involved poses and extrinsics (both
|
||||
* are Pose3 variables).
|
||||
* @addtogroup SLAM
|
||||
*/
|
||||
class SmartStereoProjectionFactorPP : public SmartStereoProjectionFactor {
|
||||
|
|
|
@ -61,10 +61,13 @@ static double interp_factor1 = 0.3;
|
|||
static double interp_factor2 = 0.4;
|
||||
static double interp_factor3 = 0.5;
|
||||
|
||||
static size_t cameraId1 = 0;
|
||||
|
||||
/* ************************************************************************* */
|
||||
// default Cal3_S2 poses with rolling shutter effect
|
||||
namespace vanillaPoseRS {
|
||||
typedef PinholePose<Cal3_S2> Camera;
|
||||
typedef CameraSet<Camera> Cameras;
|
||||
static Cal3_S2::shared_ptr sharedK(new Cal3_S2(fov, w, h));
|
||||
Pose3 interp_pose1 = interpolate<Pose3>(level_pose, pose_right, interp_factor1);
|
||||
Pose3 interp_pose2 = interpolate<Pose3>(pose_right, pose_above, interp_factor2);
|
||||
|
@ -72,6 +75,9 @@ Pose3 interp_pose3 = interpolate<Pose3>(pose_above, level_pose, interp_factor3);
|
|||
Camera cam1(interp_pose1, sharedK);
|
||||
Camera cam2(interp_pose2, sharedK);
|
||||
Camera cam3(interp_pose3, sharedK);
|
||||
SmartProjectionParams params(
|
||||
gtsam::HESSIAN,
|
||||
gtsam::ZERO_ON_DEGENERACY); // only config that works with RS factors
|
||||
} // namespace vanillaPoseRS
|
||||
|
||||
LevenbergMarquardtParams lmParams;
|
||||
|
@ -80,26 +86,35 @@ typedef SmartProjectionPoseFactorRollingShutter<PinholePose<Cal3_S2>>
|
|||
|
||||
/* ************************************************************************* */
|
||||
TEST(SmartProjectionPoseFactorRollingShutter, Constructor) {
|
||||
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model));
|
||||
using namespace vanillaPoseRS;
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
|
||||
SmartFactorRS::shared_ptr factor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SmartProjectionPoseFactorRollingShutter, Constructor2) {
|
||||
SmartProjectionParams params;
|
||||
using namespace vanillaPoseRS;
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
|
||||
params.setRankTolerance(rankTol);
|
||||
SmartFactorRS factor1(model, params);
|
||||
SmartFactorRS factor1(model, cameraRig, params);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SmartProjectionPoseFactorRollingShutter, add) {
|
||||
using namespace vanillaPose;
|
||||
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model));
|
||||
factor1->add(measurement1, x1, x2, interp_factor, sharedK, body_P_sensor);
|
||||
using namespace vanillaPoseRS;
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
|
||||
SmartFactorRS::shared_ptr factor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
factor1->add(measurement1, x1, x2, interp_factor);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST(SmartProjectionPoseFactorRollingShutter, Equals) {
|
||||
using namespace vanillaPose;
|
||||
using namespace vanillaPoseRS;
|
||||
|
||||
// create fake measurements
|
||||
Point2Vector measurements;
|
||||
|
@ -112,68 +127,88 @@ TEST(SmartProjectionPoseFactorRollingShutter, Equals) {
|
|||
key_pairs.push_back(std::make_pair(x2, x3));
|
||||
key_pairs.push_back(std::make_pair(x3, x4));
|
||||
|
||||
std::vector<boost::shared_ptr<Cal3_S2>> intrinsicCalibrations;
|
||||
intrinsicCalibrations.push_back(sharedK);
|
||||
intrinsicCalibrations.push_back(sharedK);
|
||||
intrinsicCalibrations.push_back(sharedK);
|
||||
|
||||
std::vector<Pose3> extrinsicCalibrations;
|
||||
extrinsicCalibrations.push_back(body_P_sensor);
|
||||
extrinsicCalibrations.push_back(body_P_sensor);
|
||||
extrinsicCalibrations.push_back(body_P_sensor);
|
||||
|
||||
std::vector<double> interp_factors;
|
||||
interp_factors.push_back(interp_factor1);
|
||||
interp_factors.push_back(interp_factor2);
|
||||
interp_factors.push_back(interp_factor3);
|
||||
|
||||
FastVector<size_t> cameraIds{0, 0, 0};
|
||||
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(body_P_sensor, sharedK));
|
||||
|
||||
// create by adding a batch of measurements with a bunch of calibrations
|
||||
SmartFactorRS::shared_ptr factor2(new SmartFactorRS(model));
|
||||
factor2->add(measurements, key_pairs, interp_factors, intrinsicCalibrations,
|
||||
extrinsicCalibrations);
|
||||
SmartFactorRS::shared_ptr factor2(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
factor2->add(measurements, key_pairs, interp_factors, cameraIds);
|
||||
|
||||
// create by adding a batch of measurements with a single calibrations
|
||||
SmartFactorRS::shared_ptr factor3(new SmartFactorRS(model));
|
||||
factor3->add(measurements, key_pairs, interp_factors, sharedK, body_P_sensor);
|
||||
SmartFactorRS::shared_ptr factor3(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
factor3->add(measurements, key_pairs, interp_factors, cameraIds);
|
||||
|
||||
{ // create equal factors and show equal returns true
|
||||
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model));
|
||||
factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor);
|
||||
factor1->add(measurement2, x2, x3, interp_factor2, sharedK, body_P_sensor);
|
||||
factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor);
|
||||
SmartFactorRS::shared_ptr factor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
factor1->add(measurement1, x1, x2, interp_factor1, cameraId1);
|
||||
factor1->add(measurement2, x2, x3, interp_factor2, cameraId1);
|
||||
factor1->add(measurement3, x3, x4, interp_factor3, cameraId1);
|
||||
|
||||
EXPECT(factor1->equals(*factor2));
|
||||
EXPECT(factor1->equals(*factor3));
|
||||
}
|
||||
{ // create equal factors and show equal returns true (use default cameraId)
|
||||
SmartFactorRS::shared_ptr factor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
factor1->add(measurement1, x1, x2, interp_factor1);
|
||||
factor1->add(measurement2, x2, x3, interp_factor2);
|
||||
factor1->add(measurement3, x3, x4, interp_factor3);
|
||||
|
||||
EXPECT(factor1->equals(*factor2));
|
||||
EXPECT(factor1->equals(*factor3));
|
||||
}
|
||||
{ // create equal factors and show equal returns true (use default cameraId)
|
||||
SmartFactorRS::shared_ptr factor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
factor1->add(measurements, key_pairs, interp_factors);
|
||||
|
||||
EXPECT(factor1->equals(*factor2));
|
||||
EXPECT(factor1->equals(*factor3));
|
||||
}
|
||||
{ // create slightly different factors (different keys) and show equal
|
||||
// returns false
|
||||
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model));
|
||||
factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor);
|
||||
factor1->add(measurement2, x2, x2, interp_factor2, sharedK,
|
||||
body_P_sensor); // different!
|
||||
factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor);
|
||||
// returns false (use default cameraIds)
|
||||
SmartFactorRS::shared_ptr factor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
factor1->add(measurement1, x1, x2, interp_factor1, cameraId1);
|
||||
factor1->add(measurement2, x2, x2, interp_factor2,
|
||||
cameraId1); // different!
|
||||
factor1->add(measurement3, x3, x4, interp_factor3, cameraId1);
|
||||
|
||||
EXPECT(!factor1->equals(*factor2));
|
||||
EXPECT(!factor1->equals(*factor3));
|
||||
}
|
||||
{ // create slightly different factors (different extrinsics) and show equal
|
||||
// returns false
|
||||
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model));
|
||||
factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor);
|
||||
factor1->add(measurement2, x2, x3, interp_factor2, sharedK,
|
||||
body_P_sensor * body_P_sensor); // different!
|
||||
factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor);
|
||||
boost::shared_ptr<Cameras> cameraRig2(new Cameras());
|
||||
cameraRig2->push_back(Camera(body_P_sensor * body_P_sensor, sharedK));
|
||||
SmartFactorRS::shared_ptr factor1(
|
||||
new SmartFactorRS(model, cameraRig2, params));
|
||||
factor1->add(measurement1, x1, x2, interp_factor1, cameraId1);
|
||||
factor1->add(measurement2, x2, x3, interp_factor2,
|
||||
cameraId1); // different!
|
||||
factor1->add(measurement3, x3, x4, interp_factor3, cameraId1);
|
||||
|
||||
EXPECT(!factor1->equals(*factor2));
|
||||
EXPECT(!factor1->equals(*factor3));
|
||||
}
|
||||
{ // create slightly different factors (different interp factors) and show
|
||||
// equal returns false
|
||||
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model));
|
||||
factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor);
|
||||
factor1->add(measurement2, x2, x3, interp_factor1, sharedK,
|
||||
body_P_sensor); // different!
|
||||
factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor);
|
||||
SmartFactorRS::shared_ptr factor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
factor1->add(measurement1, x1, x2, interp_factor1, cameraId1);
|
||||
factor1->add(measurement2, x2, x3, interp_factor1,
|
||||
cameraId1); // different!
|
||||
factor1->add(measurement3, x3, x4, interp_factor3, cameraId1);
|
||||
|
||||
EXPECT(!factor1->equals(*factor2));
|
||||
EXPECT(!factor1->equals(*factor3));
|
||||
|
@ -197,9 +232,12 @@ TEST(SmartProjectionPoseFactorRollingShutter, noiselessErrorAndJacobians) {
|
|||
Point2 level_uv_right = cam2.project(landmark1);
|
||||
Pose3 body_P_sensorId = Pose3::identity();
|
||||
|
||||
SmartFactorRS factor(model);
|
||||
factor.add(level_uv, x1, x2, interp_factor1, sharedK, body_P_sensorId);
|
||||
factor.add(level_uv_right, x2, x3, interp_factor2, sharedK, body_P_sensorId);
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(body_P_sensorId, sharedK));
|
||||
|
||||
SmartFactorRS factor(model, cameraRig, params);
|
||||
factor.add(level_uv, x1, x2, interp_factor1);
|
||||
factor.add(level_uv_right, x2, x3, interp_factor2);
|
||||
|
||||
Values values; // it's a pose factor, hence these are poses
|
||||
values.insert(x1, level_pose);
|
||||
|
@ -272,10 +310,12 @@ TEST(SmartProjectionPoseFactorRollingShutter, noisyErrorAndJacobians) {
|
|||
Point2 level_uv_right = cam2.project(landmark1);
|
||||
Pose3 body_P_sensorNonId = body_P_sensor;
|
||||
|
||||
SmartFactorRS factor(model);
|
||||
factor.add(level_uv, x1, x2, interp_factor1, sharedK, body_P_sensorNonId);
|
||||
factor.add(level_uv_right, x2, x3, interp_factor2, sharedK,
|
||||
body_P_sensorNonId);
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(body_P_sensorNonId, sharedK));
|
||||
|
||||
SmartFactorRS factor(model, cameraRig, params);
|
||||
factor.add(level_uv, x1, x2, interp_factor1);
|
||||
factor.add(level_uv_right, x2, x3, interp_factor2);
|
||||
|
||||
Values values; // it's a pose factor, hence these are poses
|
||||
values.insert(x1, level_pose);
|
||||
|
@ -364,14 +404,20 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses) {
|
|||
interp_factors.push_back(interp_factor2);
|
||||
interp_factors.push_back(interp_factor3);
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model));
|
||||
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, sharedK);
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor2(new SmartFactorRS(model));
|
||||
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, sharedK);
|
||||
SmartFactorRS::shared_ptr smartFactor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors);
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor3(new SmartFactorRS(model));
|
||||
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, sharedK);
|
||||
SmartFactorRS::shared_ptr smartFactor2(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors);
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor3(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors);
|
||||
|
||||
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
|
||||
|
||||
|
@ -411,6 +457,170 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses) {
|
|||
EXPECT(assert_equal(pose_above, result.at<Pose3>(x3), 1e-6));
|
||||
}
|
||||
|
||||
/* *************************************************************************/
|
||||
TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses_multiCam) {
|
||||
using namespace vanillaPoseRS;
|
||||
Point2Vector measurements_lmk1, measurements_lmk2, measurements_lmk3;
|
||||
|
||||
// Project three landmarks into three cameras
|
||||
projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_lmk1);
|
||||
projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_lmk2);
|
||||
projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_lmk3);
|
||||
|
||||
// create inputs
|
||||
std::vector<std::pair<Key, Key>> key_pairs;
|
||||
key_pairs.push_back(std::make_pair(x1, x2));
|
||||
key_pairs.push_back(std::make_pair(x2, x3));
|
||||
key_pairs.push_back(std::make_pair(x3, x1));
|
||||
|
||||
std::vector<double> interp_factors;
|
||||
interp_factors.push_back(interp_factor1);
|
||||
interp_factors.push_back(interp_factor2);
|
||||
interp_factors.push_back(interp_factor3);
|
||||
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(body_P_sensor, sharedK));
|
||||
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, {1, 1, 1});
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor2(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, {1, 1, 1});
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor3(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, {1, 1, 1});
|
||||
|
||||
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.push_back(smartFactor1);
|
||||
graph.push_back(smartFactor2);
|
||||
graph.push_back(smartFactor3);
|
||||
graph.addPrior(x1, level_pose, noisePrior);
|
||||
graph.addPrior(x2, pose_right, noisePrior);
|
||||
|
||||
Values groundTruth;
|
||||
groundTruth.insert(x1, level_pose);
|
||||
groundTruth.insert(x2, pose_right);
|
||||
groundTruth.insert(x3, pose_above); // pose above is the pose of the camera
|
||||
DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9);
|
||||
|
||||
// Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10),
|
||||
// Point3(0.5,0.1,0.3)); // noise from regular projection factor test below
|
||||
Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100),
|
||||
Point3(0.1, 0.1, 0.1)); // smaller noise
|
||||
Values values;
|
||||
values.insert(x1, level_pose);
|
||||
values.insert(x2, pose_right);
|
||||
// initialize third pose with some noise, we expect it to move back to
|
||||
// original pose_above
|
||||
values.insert(x3, pose_above * noise_pose);
|
||||
EXPECT( // check that the pose is actually noisy
|
||||
assert_equal(Pose3(Rot3(0, -0.0314107591, 0.99950656, -0.99950656,
|
||||
-0.0313952598, -0.000986635786, 0.0314107591,
|
||||
-0.999013364, -0.0313952598),
|
||||
Point3(0.1, -0.1, 1.9)),
|
||||
values.at<Pose3>(x3)));
|
||||
|
||||
Values result;
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, lmParams);
|
||||
result = optimizer.optimize();
|
||||
EXPECT(assert_equal(pose_above, result.at<Pose3>(x3), 1e-6));
|
||||
}
|
||||
|
||||
/* *************************************************************************/
|
||||
TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses_multiCam2) {
|
||||
using namespace vanillaPoseRS;
|
||||
|
||||
Point2Vector measurements_lmk1, measurements_lmk2, measurements_lmk3;
|
||||
|
||||
// create arbitrary body_T_sensor (transforms from sensor to body)
|
||||
Pose3 body_T_sensor1 = Pose3(Rot3::Ypr(-0.03, 0., 0.01), Point3(1, 1, 1));
|
||||
Pose3 body_T_sensor2 = Pose3(Rot3::Ypr(-0.1, 0., 0.05), Point3(0, 0, 1));
|
||||
Pose3 body_T_sensor3 = Pose3(Rot3::Ypr(-0.3, 0., -0.05), Point3(0, 1, 1));
|
||||
|
||||
Camera camera1(interp_pose1 * body_T_sensor1, sharedK);
|
||||
Camera camera2(interp_pose2 * body_T_sensor2, sharedK);
|
||||
Camera camera3(interp_pose3 * body_T_sensor3, sharedK);
|
||||
|
||||
// Project three landmarks into three cameras
|
||||
projectToMultipleCameras(camera1, camera2, camera3, landmark1,
|
||||
measurements_lmk1);
|
||||
projectToMultipleCameras(camera1, camera2, camera3, landmark2,
|
||||
measurements_lmk2);
|
||||
projectToMultipleCameras(camera1, camera2, camera3, landmark3,
|
||||
measurements_lmk3);
|
||||
|
||||
// create inputs
|
||||
std::vector<std::pair<Key, Key>> key_pairs;
|
||||
key_pairs.push_back(std::make_pair(x1, x2));
|
||||
key_pairs.push_back(std::make_pair(x2, x3));
|
||||
key_pairs.push_back(std::make_pair(x3, x1));
|
||||
|
||||
std::vector<double> interp_factors;
|
||||
interp_factors.push_back(interp_factor1);
|
||||
interp_factors.push_back(interp_factor2);
|
||||
interp_factors.push_back(interp_factor3);
|
||||
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(body_T_sensor1, sharedK));
|
||||
cameraRig->push_back(Camera(body_T_sensor2, sharedK));
|
||||
cameraRig->push_back(Camera(body_T_sensor3, sharedK));
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, {0, 1, 2});
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor2(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, {0, 1, 2});
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor3(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, {0, 1, 2});
|
||||
|
||||
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
|
||||
|
||||
NonlinearFactorGraph graph;
|
||||
graph.push_back(smartFactor1);
|
||||
graph.push_back(smartFactor2);
|
||||
graph.push_back(smartFactor3);
|
||||
graph.addPrior(x1, level_pose, noisePrior);
|
||||
graph.addPrior(x2, pose_right, noisePrior);
|
||||
|
||||
Values groundTruth;
|
||||
groundTruth.insert(x1, level_pose);
|
||||
groundTruth.insert(x2, pose_right);
|
||||
groundTruth.insert(x3, pose_above); // pose above is the pose of the camera
|
||||
DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9);
|
||||
|
||||
// Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10),
|
||||
// Point3(0.5,0.1,0.3)); // noise from regular projection factor test below
|
||||
Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100),
|
||||
Point3(0.1, 0.1, 0.1)); // smaller noise
|
||||
Values values;
|
||||
values.insert(x1, level_pose);
|
||||
values.insert(x2, pose_right);
|
||||
// initialize third pose with some noise, we expect it to move back to
|
||||
// original pose_above
|
||||
values.insert(x3, pose_above * noise_pose);
|
||||
EXPECT( // check that the pose is actually noisy
|
||||
assert_equal(Pose3(Rot3(0, -0.0314107591, 0.99950656, -0.99950656,
|
||||
-0.0313952598, -0.000986635786, 0.0314107591,
|
||||
-0.999013364, -0.0313952598),
|
||||
Point3(0.1, -0.1, 1.9)),
|
||||
values.at<Pose3>(x3)));
|
||||
|
||||
Values result;
|
||||
LevenbergMarquardtOptimizer optimizer(graph, values, lmParams);
|
||||
result = optimizer.optimize();
|
||||
EXPECT(assert_equal(pose_above, result.at<Pose3>(x3), 1e-4));
|
||||
}
|
||||
|
||||
/* *************************************************************************/
|
||||
TEST(SmartProjectionPoseFactorRollingShutter, hessian_simple_2poses) {
|
||||
// here we replicate a test in SmartProjectionPoseFactor by setting
|
||||
|
@ -418,7 +628,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, hessian_simple_2poses) {
|
|||
// falls back to standard pixel measurements) Note: this is a quite extreme
|
||||
// test since in typical camera you would not have more than 1 measurement per
|
||||
// landmark at each interpolated pose
|
||||
using namespace vanillaPose;
|
||||
using namespace vanillaPoseRS;
|
||||
|
||||
// Default cameras for simple derivatives
|
||||
static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0));
|
||||
|
@ -438,15 +648,17 @@ TEST(SmartProjectionPoseFactorRollingShutter, hessian_simple_2poses) {
|
|||
measurements_lmk1.push_back(cam1.project(landmark1));
|
||||
measurements_lmk1.push_back(cam2.project(landmark1));
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model));
|
||||
double interp_factor = 0; // equivalent to measurement taken at pose 1
|
||||
smartFactor1->add(measurements_lmk1[0], x1, x2, interp_factor, sharedKSimple,
|
||||
body_P_sensorId);
|
||||
interp_factor = 1; // equivalent to measurement taken at pose 2
|
||||
smartFactor1->add(measurements_lmk1[1], x1, x2, interp_factor, sharedKSimple,
|
||||
body_P_sensorId);
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(body_P_sensorId, sharedKSimple));
|
||||
|
||||
SmartFactor::Cameras cameras;
|
||||
SmartFactorRS::shared_ptr smartFactor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
double interp_factor = 0; // equivalent to measurement taken at pose 1
|
||||
smartFactor1->add(measurements_lmk1[0], x1, x2, interp_factor);
|
||||
interp_factor = 1; // equivalent to measurement taken at pose 2
|
||||
smartFactor1->add(measurements_lmk1[1], x1, x2, interp_factor);
|
||||
|
||||
SmartFactorRS::Cameras cameras;
|
||||
cameras.push_back(cam1);
|
||||
cameras.push_back(cam2);
|
||||
|
||||
|
@ -534,14 +746,17 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses_EPI) {
|
|||
params.setLandmarkDistanceThreshold(excludeLandmarksFutherThanDist);
|
||||
params.setEnableEPI(true);
|
||||
|
||||
SmartFactorRS smartFactor1(model, params);
|
||||
smartFactor1.add(measurements_lmk1, key_pairs, interp_factors, sharedK);
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
|
||||
|
||||
SmartFactorRS smartFactor2(model, params);
|
||||
smartFactor2.add(measurements_lmk2, key_pairs, interp_factors, sharedK);
|
||||
SmartFactorRS smartFactor1(model, cameraRig, params);
|
||||
smartFactor1.add(measurements_lmk1, key_pairs, interp_factors);
|
||||
|
||||
SmartFactorRS smartFactor3(model, params);
|
||||
smartFactor3.add(measurements_lmk3, key_pairs, interp_factors, sharedK);
|
||||
SmartFactorRS smartFactor2(model, cameraRig, params);
|
||||
smartFactor2.add(measurements_lmk2, key_pairs, interp_factors);
|
||||
|
||||
SmartFactorRS smartFactor3(model, cameraRig, params);
|
||||
smartFactor3.add(measurements_lmk3, key_pairs, interp_factors);
|
||||
|
||||
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
|
||||
|
||||
|
@ -594,18 +809,23 @@ TEST(SmartProjectionPoseFactorRollingShutter,
|
|||
SmartProjectionParams params;
|
||||
params.setRankTolerance(1.0);
|
||||
params.setLinearizationMode(gtsam::HESSIAN);
|
||||
params.setDegeneracyMode(gtsam::IGNORE_DEGENERACY);
|
||||
// params.setDegeneracyMode(gtsam::IGNORE_DEGENERACY); // this would give an
|
||||
// exception as expected
|
||||
params.setDegeneracyMode(gtsam::ZERO_ON_DEGENERACY);
|
||||
params.setLandmarkDistanceThreshold(excludeLandmarksFutherThanDist);
|
||||
params.setEnableEPI(false);
|
||||
|
||||
SmartFactorRS smartFactor1(model, params);
|
||||
smartFactor1.add(measurements_lmk1, key_pairs, interp_factors, sharedK);
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
|
||||
|
||||
SmartFactorRS smartFactor2(model, params);
|
||||
smartFactor2.add(measurements_lmk2, key_pairs, interp_factors, sharedK);
|
||||
SmartFactorRS smartFactor1(model, cameraRig, params);
|
||||
smartFactor1.add(measurements_lmk1, key_pairs, interp_factors);
|
||||
|
||||
SmartFactorRS smartFactor3(model, params);
|
||||
smartFactor3.add(measurements_lmk3, key_pairs, interp_factors, sharedK);
|
||||
SmartFactorRS smartFactor2(model, cameraRig, params);
|
||||
smartFactor2.add(measurements_lmk2, key_pairs, interp_factors);
|
||||
|
||||
SmartFactorRS smartFactor3(model, cameraRig, params);
|
||||
smartFactor3.add(measurements_lmk3, key_pairs, interp_factors);
|
||||
|
||||
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
|
||||
|
||||
|
@ -673,17 +893,24 @@ TEST(SmartProjectionPoseFactorRollingShutter,
|
|||
params.setDynamicOutlierRejectionThreshold(dynamicOutlierRejectionThreshold);
|
||||
params.setEnableEPI(false);
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model, params));
|
||||
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, sharedK);
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor2(new SmartFactorRS(model, params));
|
||||
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, sharedK);
|
||||
SmartFactorRS::shared_ptr smartFactor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors);
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor3(new SmartFactorRS(model, params));
|
||||
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, sharedK);
|
||||
SmartFactorRS::shared_ptr smartFactor2(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors);
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor4(new SmartFactorRS(model, params));
|
||||
smartFactor4->add(measurements_lmk4, key_pairs, interp_factors, sharedK);
|
||||
SmartFactorRS::shared_ptr smartFactor3(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors);
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor4(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor4->add(measurements_lmk4, key_pairs, interp_factors);
|
||||
|
||||
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
|
||||
|
||||
|
@ -733,8 +960,12 @@ TEST(SmartProjectionPoseFactorRollingShutter,
|
|||
interp_factors.push_back(interp_factor2);
|
||||
interp_factors.push_back(interp_factor3);
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model));
|
||||
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, sharedK);
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors);
|
||||
|
||||
Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100),
|
||||
Point3(0.1, 0.1, 0.1)); // smaller noise
|
||||
|
@ -870,9 +1101,12 @@ TEST(SmartProjectionPoseFactorRollingShutter,
|
|||
interp_factors.push_back(interp_factor3);
|
||||
interp_factors.push_back(interp_factor1);
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model));
|
||||
smartFactor1->add(measurements_lmk1_redundant, key_pairs, interp_factors,
|
||||
sharedK);
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor1->add(measurements_lmk1_redundant, key_pairs, interp_factors);
|
||||
|
||||
Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100),
|
||||
Point3(0.1, 0.1, 0.1)); // smaller noise
|
||||
|
@ -1026,15 +1260,21 @@ TEST(SmartProjectionPoseFactorRollingShutter,
|
|||
interp_factors_redundant.push_back(
|
||||
interp_factors.at(0)); // we readd the first interp factor
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model));
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor1(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor1->add(measurements_lmk1_redundant, key_pairs_redundant,
|
||||
interp_factors_redundant, sharedK);
|
||||
interp_factors_redundant);
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor2(new SmartFactorRS(model));
|
||||
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, sharedK);
|
||||
SmartFactorRS::shared_ptr smartFactor2(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors);
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactor3(new SmartFactorRS(model));
|
||||
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, sharedK);
|
||||
SmartFactorRS::shared_ptr smartFactor3(
|
||||
new SmartFactorRS(model, cameraRig, params));
|
||||
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors);
|
||||
|
||||
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
|
||||
|
||||
|
@ -1076,16 +1316,20 @@ TEST(SmartProjectionPoseFactorRollingShutter,
|
|||
|
||||
#ifndef DISABLE_TIMING
|
||||
#include <gtsam/base/timing.h>
|
||||
// -Total: 0 CPU (0 times, 0 wall, 0.04 children, min: 0 max: 0)
|
||||
//| -SF RS LINEARIZE: 0.02 CPU (1000 times, 0.017244 wall, 0.02 children, min:
|
||||
// 0 max: 0) | -RS LINEARIZE: 0.02 CPU (1000 times, 0.009035 wall, 0.02
|
||||
// children, min: 0 max: 0)
|
||||
//-Total: 0 CPU (0 times, 0 wall, 0.21 children, min: 0 max: 0)
|
||||
//| -SF RS LINEARIZE: 0.09 CPU
|
||||
// (10000 times, 0.124106 wall, 0.09 children, min: 0 max: 0)
|
||||
//| -RS LINEARIZE: 0.09 CPU
|
||||
// (10000 times, 0.068719 wall, 0.09 children, min: 0 max: 0)
|
||||
/* *************************************************************************/
|
||||
TEST(SmartProjectionPoseFactorRollingShutter, timing) {
|
||||
using namespace vanillaPose;
|
||||
|
||||
// Default cameras for simple derivatives
|
||||
static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0));
|
||||
SmartProjectionParams params(
|
||||
gtsam::HESSIAN,
|
||||
gtsam::ZERO_ON_DEGENERACY); // only config that works with RS factors
|
||||
|
||||
Rot3 R = Rot3::identity();
|
||||
Pose3 pose1 = Pose3(R, Point3(0, 0, 0));
|
||||
|
@ -1102,16 +1346,18 @@ TEST(SmartProjectionPoseFactorRollingShutter, timing) {
|
|||
measurements_lmk1.push_back(cam1.project(landmark1));
|
||||
measurements_lmk1.push_back(cam2.project(landmark1));
|
||||
|
||||
size_t nrTests = 1000;
|
||||
size_t nrTests = 10000;
|
||||
|
||||
for (size_t i = 0; i < nrTests; i++) {
|
||||
SmartFactorRS::shared_ptr smartFactorRS(new SmartFactorRS(model));
|
||||
boost::shared_ptr<Cameras> cameraRig(new Cameras());
|
||||
cameraRig->push_back(Camera(body_P_sensorId, sharedKSimple));
|
||||
|
||||
SmartFactorRS::shared_ptr smartFactorRS(new SmartFactorRS(
|
||||
model, cameraRig, params));
|
||||
double interp_factor = 0; // equivalent to measurement taken at pose 1
|
||||
smartFactorRS->add(measurements_lmk1[0], x1, x2, interp_factor,
|
||||
sharedKSimple, body_P_sensorId);
|
||||
smartFactorRS->add(measurements_lmk1[0], x1, x2, interp_factor);
|
||||
interp_factor = 1; // equivalent to measurement taken at pose 2
|
||||
smartFactorRS->add(measurements_lmk1[1], x1, x2, interp_factor,
|
||||
sharedKSimple, body_P_sensorId);
|
||||
smartFactorRS->add(measurements_lmk1[1], x1, x2, interp_factor);
|
||||
|
||||
Values values;
|
||||
values.insert(x1, pose1);
|
||||
|
@ -1122,7 +1368,8 @@ TEST(SmartProjectionPoseFactorRollingShutter, timing) {
|
|||
}
|
||||
|
||||
for (size_t i = 0; i < nrTests; i++) {
|
||||
SmartFactor::shared_ptr smartFactor(new SmartFactor(model, sharedKSimple));
|
||||
SmartFactor::shared_ptr smartFactor(
|
||||
new SmartFactor(model, sharedKSimple, params));
|
||||
smartFactor->add(measurements_lmk1[0], x1);
|
||||
smartFactor->add(measurements_lmk1[1], x2);
|
||||
|
||||
|
|
|
@ -89,12 +89,19 @@ set_target_properties(${GTSAM_PYTHON_TARGET} PROPERTIES
|
|||
RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name
|
||||
)
|
||||
|
||||
# Set the path for the GTSAM python module
|
||||
set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam)
|
||||
|
||||
# Symlink all tests .py files to build folder.
|
||||
# Copy all python files to build folder.
|
||||
copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
|
||||
"${GTSAM_MODULE_PATH}")
|
||||
|
||||
# Hack to get python test files copied every time they are modified
|
||||
file(GLOB GTSAM_PYTHON_TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/tests/*.py")
|
||||
foreach(test_file ${GTSAM_PYTHON_TEST_FILES})
|
||||
configure_file(${test_file} "${GTSAM_MODULE_PATH}/tests/${test_file}" COPYONLY)
|
||||
endforeach()
|
||||
|
||||
# Common directory for data/datasets stored with the package.
|
||||
# This will store the data in the Python site package directly.
|
||||
file(COPY "${GTSAM_SOURCE_DIR}/examples/Data" DESTINATION "${GTSAM_MODULE_PATH}")
|
||||
|
@ -147,10 +154,16 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
|
|||
|
||||
set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable)
|
||||
|
||||
# Symlink all tests .py files to build folder.
|
||||
# Copy all python files to build folder.
|
||||
copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable"
|
||||
"${GTSAM_UNSTABLE_MODULE_PATH}")
|
||||
|
||||
# Hack to get python test files copied every time they are modified
|
||||
file(GLOB GTSAM_UNSTABLE_PYTHON_TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable/tests/*.py")
|
||||
foreach(test_file ${GTSAM_PYTHON_TEST_FILES})
|
||||
configure_file(${test_file} "${GTSAM_UNSTABLE_MODULE_PATH}/tests/${test_file}" COPYONLY)
|
||||
endforeach()
|
||||
|
||||
# Add gtsam_unstable to the install target
|
||||
list(APPEND GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_UNSTABLE_TARGET})
|
||||
|
||||
|
|
|
@ -10,31 +10,51 @@ A script validating and demonstrating the ImuFactor inference.
|
|||
Author: Frank Dellaert, Varun Agrawal
|
||||
"""
|
||||
|
||||
# pylint: disable=no-name-in-module,unused-import,arguments-differ
|
||||
# pylint: disable=no-name-in-module,unused-import,arguments-differ,import-error,wrong-import-order
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import math
|
||||
|
||||
import gtsam
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
import gtsam
|
||||
from gtsam.symbol_shorthand import B, V, X
|
||||
from gtsam.utils.plot import plot_pose3
|
||||
from mpl_toolkits.mplot3d import Axes3D
|
||||
|
||||
from PreintegrationExample import POSES_FIG, PreintegrationExample
|
||||
|
||||
BIAS_KEY = B(0)
|
||||
GRAVITY = 9.81
|
||||
|
||||
np.set_printoptions(precision=3, suppress=True)
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser("ImuFactorExample.py")
|
||||
parser.add_argument("--twist_scenario",
|
||||
default="sick_twist",
|
||||
choices=("zero_twist", "forward_twist", "loop_twist",
|
||||
"sick_twist"))
|
||||
parser.add_argument("--time",
|
||||
"-T",
|
||||
default=12,
|
||||
type=int,
|
||||
help="Total navigation time in seconds")
|
||||
parser.add_argument("--compute_covariances",
|
||||
default=False,
|
||||
action='store_true')
|
||||
parser.add_argument("--verbose", default=False, action='store_true')
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
class ImuFactorExample(PreintegrationExample):
|
||||
"""Class to run example of the Imu Factor."""
|
||||
def __init__(self, twist_scenario="sick_twist"):
|
||||
def __init__(self, twist_scenario: str = "sick_twist"):
|
||||
self.velocity = np.array([2, 0, 0])
|
||||
self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1)
|
||||
self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
|
||||
|
@ -51,19 +71,30 @@ class ImuFactorExample(PreintegrationExample):
|
|||
gyroBias = np.array([0.1, 0.3, -0.1])
|
||||
bias = gtsam.imuBias.ConstantBias(accBias, gyroBias)
|
||||
|
||||
params = gtsam.PreintegrationParams.MakeSharedU(GRAVITY)
|
||||
|
||||
# Some arbitrary noise sigmas
|
||||
gyro_sigma = 1e-3
|
||||
accel_sigma = 1e-3
|
||||
I_3x3 = np.eye(3)
|
||||
params.setGyroscopeCovariance(gyro_sigma**2 * I_3x3)
|
||||
params.setAccelerometerCovariance(accel_sigma**2 * I_3x3)
|
||||
params.setIntegrationCovariance(1e-7**2 * I_3x3)
|
||||
|
||||
dt = 1e-2
|
||||
super(ImuFactorExample, self).__init__(twist_scenarios[twist_scenario],
|
||||
bias, dt)
|
||||
bias, params, dt)
|
||||
|
||||
def addPrior(self, i, graph):
|
||||
"""Add priors at time step `i`."""
|
||||
def addPrior(self, i: int, graph: gtsam.NonlinearFactorGraph):
|
||||
"""Add a prior on the navigation state at time `i`."""
|
||||
state = self.scenario.navState(i)
|
||||
graph.push_back(
|
||||
gtsam.PriorFactorPose3(X(i), state.pose(), self.priorNoise))
|
||||
graph.push_back(
|
||||
gtsam.PriorFactorVector(V(i), state.velocity(), self.velNoise))
|
||||
|
||||
def optimize(self, graph, initial):
|
||||
def optimize(self, graph: gtsam.NonlinearFactorGraph,
|
||||
initial: gtsam.Values):
|
||||
"""Optimize using Levenberg-Marquardt optimization."""
|
||||
params = gtsam.LevenbergMarquardtParams()
|
||||
params.setVerbosityLM("SUMMARY")
|
||||
|
@ -71,24 +102,49 @@ class ImuFactorExample(PreintegrationExample):
|
|||
result = optimizer.optimize()
|
||||
return result
|
||||
|
||||
def plot(self, result):
|
||||
"""Plot resulting poses."""
|
||||
def plot(self,
|
||||
values: gtsam.Values,
|
||||
title: str = "Estimated Trajectory",
|
||||
fignum: int = POSES_FIG + 1,
|
||||
show: bool = False):
|
||||
"""
|
||||
Plot poses in values.
|
||||
|
||||
Args:
|
||||
values: The values object with the poses to plot.
|
||||
title: The title of the plot.
|
||||
fignum: The matplotlib figure number.
|
||||
POSES_FIG is a value from the PreintegrationExample which we simply increment to generate a new figure.
|
||||
show: Flag indicating whether to display the figure.
|
||||
"""
|
||||
i = 0
|
||||
while result.exists(X(i)):
|
||||
pose_i = result.atPose3(X(i))
|
||||
plot_pose3(POSES_FIG + 1, pose_i, 1)
|
||||
while values.exists(X(i)):
|
||||
pose_i = values.atPose3(X(i))
|
||||
plot_pose3(fignum, pose_i, 1)
|
||||
i += 1
|
||||
plt.title("Estimated Trajectory")
|
||||
plt.title(title)
|
||||
|
||||
gtsam.utils.plot.set_axes_equal(POSES_FIG + 1)
|
||||
gtsam.utils.plot.set_axes_equal(fignum)
|
||||
|
||||
print("Bias Values", result.atConstantBias(BIAS_KEY))
|
||||
print("Bias Values", values.atConstantBias(BIAS_KEY))
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
def run(self, T=12, compute_covariances=False, verbose=True):
|
||||
"""Main runner."""
|
||||
if show:
|
||||
plt.show()
|
||||
|
||||
def run(self,
|
||||
T: int = 12,
|
||||
compute_covariances: bool = False,
|
||||
verbose: bool = True):
|
||||
"""
|
||||
Main runner.
|
||||
|
||||
Args:
|
||||
T: Total trajectory time.
|
||||
compute_covariances: Flag indicating whether to compute marginal covariances.
|
||||
verbose: Flag indicating if printing should be verbose.
|
||||
"""
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# initialize data structure for pre-integrated IMU measurements
|
||||
|
@ -173,25 +229,11 @@ class ImuFactorExample(PreintegrationExample):
|
|||
print("Covariance on vel {}:\n{}\n".format(
|
||||
i, marginals.marginalCovariance(V(i))))
|
||||
|
||||
self.plot(result)
|
||||
self.plot(result, show=True)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser("ImuFactorExample.py")
|
||||
parser.add_argument("--twist_scenario",
|
||||
default="sick_twist",
|
||||
choices=("zero_twist", "forward_twist", "loop_twist",
|
||||
"sick_twist"))
|
||||
parser.add_argument("--time",
|
||||
"-T",
|
||||
default=12,
|
||||
type=int,
|
||||
help="Total time in seconds")
|
||||
parser.add_argument("--compute_covariances",
|
||||
default=False,
|
||||
action='store_true')
|
||||
parser.add_argument("--verbose", default=False, action='store_true')
|
||||
args = parser.parse_args()
|
||||
args = parse_args()
|
||||
|
||||
ImuFactorExample(args.twist_scenario).run(args.time,
|
||||
args.compute_covariances,
|
||||
|
|
|
@ -0,0 +1,178 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Pose SLAM example using iSAM2 in the 2D plane.
|
||||
Author: Jerred Chen, Yusuf Ali
|
||||
Modeled after:
|
||||
- VisualISAM2Example by: Duy-Nguyen Ta (C++), Frank Dellaert (Python)
|
||||
- Pose2SLAMExample by: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
|
||||
"""
|
||||
|
||||
import math
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
|
||||
def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values,
|
||||
key: int):
|
||||
"""Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2."""
|
||||
|
||||
# Print the current estimates computed using iSAM2.
|
||||
print("*"*50 + f"\nInference after State {key+1}:\n")
|
||||
print(current_estimate)
|
||||
|
||||
# Compute the marginals for all states in the graph.
|
||||
marginals = gtsam.Marginals(graph, current_estimate)
|
||||
|
||||
# Plot the newly updated iSAM2 inference.
|
||||
fig = plt.figure(0)
|
||||
axes = fig.gca()
|
||||
plt.cla()
|
||||
|
||||
i = 1
|
||||
while current_estimate.exists(i):
|
||||
gtsam_plot.plot_pose2(0, current_estimate.atPose2(i), 0.5, marginals.marginalCovariance(i))
|
||||
i += 1
|
||||
|
||||
plt.axis('equal')
|
||||
axes.set_xlim(-1, 5)
|
||||
axes.set_ylim(-1, 3)
|
||||
plt.pause(1)
|
||||
|
||||
def determine_loop_closure(odom: np.ndarray, current_estimate: gtsam.Values,
|
||||
key: int, xy_tol=0.6, theta_tol=17) -> int:
|
||||
"""Simple brute force approach which iterates through previous states
|
||||
and checks for loop closure.
|
||||
|
||||
Args:
|
||||
odom: Vector representing noisy odometry (x, y, theta) measurement in the body frame.
|
||||
current_estimate: The current estimates computed by iSAM2.
|
||||
key: Key corresponding to the current state estimate of the robot.
|
||||
xy_tol: Optional argument for the x-y measurement tolerance, in meters.
|
||||
theta_tol: Optional argument for the theta measurement tolerance, in degrees.
|
||||
Returns:
|
||||
k: The key of the state which is helping add the loop closure constraint.
|
||||
If loop closure is not found, then None is returned.
|
||||
"""
|
||||
if current_estimate:
|
||||
prev_est = current_estimate.atPose2(key+1)
|
||||
rotated_odom = prev_est.rotation().matrix() @ odom[:2]
|
||||
curr_xy = np.array([prev_est.x() + rotated_odom[0],
|
||||
prev_est.y() + rotated_odom[1]])
|
||||
curr_theta = prev_est.theta() + odom[2]
|
||||
for k in range(1, key+1):
|
||||
pose_xy = np.array([current_estimate.atPose2(k).x(),
|
||||
current_estimate.atPose2(k).y()])
|
||||
pose_theta = current_estimate.atPose2(k).theta()
|
||||
if (abs(pose_xy - curr_xy) <= xy_tol).all() and \
|
||||
(abs(pose_theta - curr_theta) <= theta_tol*np.pi/180):
|
||||
return k
|
||||
|
||||
def Pose2SLAM_ISAM2_example():
|
||||
"""Perform 2D SLAM given the ground truth changes in pose as well as
|
||||
simple loop closure detection."""
|
||||
plt.ion()
|
||||
|
||||
# Declare the 2D translational standard deviations of the prior factor's Gaussian model, in meters.
|
||||
prior_xy_sigma = 0.3
|
||||
|
||||
# Declare the 2D rotational standard deviation of the prior factor's Gaussian model, in degrees.
|
||||
prior_theta_sigma = 5
|
||||
|
||||
# Declare the 2D translational standard deviations of the odometry factor's Gaussian model, in meters.
|
||||
odometry_xy_sigma = 0.2
|
||||
|
||||
# Declare the 2D rotational standard deviation of the odometry factor's Gaussian model, in degrees.
|
||||
odometry_theta_sigma = 5
|
||||
|
||||
# Although this example only uses linear measurements and Gaussian noise models, it is important
|
||||
# to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
|
||||
# simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
|
||||
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_xy_sigma,
|
||||
prior_xy_sigma,
|
||||
prior_theta_sigma*np.pi/180]))
|
||||
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_xy_sigma,
|
||||
odometry_xy_sigma,
|
||||
odometry_theta_sigma*np.pi/180]))
|
||||
|
||||
# Create a Nonlinear factor graph as well as the data structure to hold state estimates.
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
initial_estimate = gtsam.Values()
|
||||
|
||||
# Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
|
||||
# update calls are required to perform the relinearization.
|
||||
parameters = gtsam.ISAM2Params()
|
||||
parameters.setRelinearizeThreshold(0.1)
|
||||
parameters.setRelinearizeSkip(1)
|
||||
isam = gtsam.ISAM2(parameters)
|
||||
|
||||
# Create the ground truth odometry measurements of the robot during the trajectory.
|
||||
true_odometry = [(2, 0, 0),
|
||||
(2, 0, math.pi/2),
|
||||
(2, 0, math.pi/2),
|
||||
(2, 0, math.pi/2),
|
||||
(2, 0, math.pi/2)]
|
||||
|
||||
# Corrupt the odometry measurements with gaussian noise to create noisy odometry measurements.
|
||||
odometry_measurements = [np.random.multivariate_normal(true_odom, ODOMETRY_NOISE.covariance())
|
||||
for true_odom in true_odometry]
|
||||
|
||||
# Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
|
||||
# iSAM2 incremental optimization.
|
||||
graph.push_back(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
|
||||
initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
|
||||
|
||||
# Initialize the current estimate which is used during the incremental inference loop.
|
||||
current_estimate = initial_estimate
|
||||
|
||||
for i in range(len(true_odometry)):
|
||||
|
||||
# Obtain the noisy odometry that is received by the robot and corrupted by gaussian noise.
|
||||
noisy_odom_x, noisy_odom_y, noisy_odom_theta = odometry_measurements[i]
|
||||
|
||||
# Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
|
||||
loop = determine_loop_closure(odometry_measurements[i], current_estimate, i, xy_tol=0.8, theta_tol=25)
|
||||
|
||||
# Add a binary factor in between two existing states if loop closure is detected.
|
||||
# Otherwise, add a binary factor between a newly observed state and the previous state.
|
||||
if loop:
|
||||
graph.push_back(gtsam.BetweenFactorPose2(i + 1, loop,
|
||||
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
|
||||
else:
|
||||
graph.push_back(gtsam.BetweenFactorPose2(i + 1, i + 2,
|
||||
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
|
||||
|
||||
# Compute and insert the initialization estimate for the current pose using the noisy odometry measurement.
|
||||
computed_estimate = current_estimate.atPose2(i + 1).compose(gtsam.Pose2(noisy_odom_x,
|
||||
noisy_odom_y,
|
||||
noisy_odom_theta))
|
||||
initial_estimate.insert(i + 2, computed_estimate)
|
||||
|
||||
# Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
|
||||
isam.update(graph, initial_estimate)
|
||||
current_estimate = isam.calculateEstimate()
|
||||
|
||||
# Report all current state estimates from the iSAM2 optimzation.
|
||||
report_on_progress(graph, current_estimate, i)
|
||||
initial_estimate.clear()
|
||||
|
||||
# Print the final covariance matrix for each pose after completing inference on the trajectory.
|
||||
marginals = gtsam.Marginals(graph, current_estimate)
|
||||
i = 1
|
||||
for i in range(1, len(true_odometry)+1):
|
||||
print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
Pose2SLAM_ISAM2_example()
|
|
@ -0,0 +1,208 @@
|
|||
"""
|
||||
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
|
||||
Atlanta, Georgia 30332-0415
|
||||
All Rights Reserved
|
||||
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
See LICENSE for the license information
|
||||
|
||||
Pose SLAM example using iSAM2 in 3D space.
|
||||
Author: Jerred Chen
|
||||
Modeled after:
|
||||
- VisualISAM2Example by: Duy-Nguyen Ta (C++), Frank Dellaert (Python)
|
||||
- Pose2SLAMExample by: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
|
||||
"""
|
||||
|
||||
from typing import List
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
import gtsam.utils.plot as gtsam_plot
|
||||
|
||||
def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values,
|
||||
key: int):
|
||||
"""Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2."""
|
||||
|
||||
# Print the current estimates computed using iSAM2.
|
||||
print("*"*50 + f"\nInference after State {key+1}:\n")
|
||||
print(current_estimate)
|
||||
|
||||
# Compute the marginals for all states in the graph.
|
||||
marginals = gtsam.Marginals(graph, current_estimate)
|
||||
|
||||
# Plot the newly updated iSAM2 inference.
|
||||
fig = plt.figure(0)
|
||||
axes = fig.gca(projection='3d')
|
||||
plt.cla()
|
||||
|
||||
i = 1
|
||||
while current_estimate.exists(i):
|
||||
gtsam_plot.plot_pose3(0, current_estimate.atPose3(i), 10,
|
||||
marginals.marginalCovariance(i))
|
||||
i += 1
|
||||
|
||||
axes.set_xlim3d(-30, 45)
|
||||
axes.set_ylim3d(-30, 45)
|
||||
axes.set_zlim3d(-30, 45)
|
||||
plt.pause(1)
|
||||
|
||||
def create_poses() -> List[gtsam.Pose3]:
|
||||
"""Creates ground truth poses of the robot."""
|
||||
P0 = np.array([[1, 0, 0, 0],
|
||||
[0, 1, 0, 0],
|
||||
[0, 0, 1, 0],
|
||||
[0, 0, 0, 1]])
|
||||
P1 = np.array([[0, -1, 0, 15],
|
||||
[1, 0, 0, 15],
|
||||
[0, 0, 1, 20],
|
||||
[0, 0, 0, 1]])
|
||||
P2 = np.array([[np.cos(np.pi/4), 0, np.sin(np.pi/4), 30],
|
||||
[0, 1, 0, 30],
|
||||
[-np.sin(np.pi/4), 0, np.cos(np.pi/4), 30],
|
||||
[0, 0, 0, 1]])
|
||||
P3 = np.array([[0, 1, 0, 30],
|
||||
[0, 0, -1, 0],
|
||||
[-1, 0, 0, -15],
|
||||
[0, 0, 0, 1]])
|
||||
P4 = np.array([[-1, 0, 0, 0],
|
||||
[0, -1, 0, -10],
|
||||
[0, 0, 1, -10],
|
||||
[0, 0, 0, 1]])
|
||||
P5 = P0[:]
|
||||
|
||||
return [gtsam.Pose3(P0), gtsam.Pose3(P1), gtsam.Pose3(P2),
|
||||
gtsam.Pose3(P3), gtsam.Pose3(P4), gtsam.Pose3(P5)]
|
||||
|
||||
def determine_loop_closure(odom_tf: gtsam.Pose3, current_estimate: gtsam.Values,
|
||||
key: int, xyz_tol=0.6, rot_tol=17) -> int:
|
||||
"""Simple brute force approach which iterates through previous states
|
||||
and checks for loop closure.
|
||||
|
||||
Args:
|
||||
odom_tf: The noisy odometry transformation measurement in the body frame.
|
||||
current_estimate: The current estimates computed by iSAM2.
|
||||
key: Key corresponding to the current state estimate of the robot.
|
||||
xyz_tol: Optional argument for the translational tolerance, in meters.
|
||||
rot_tol: Optional argument for the rotational tolerance, in degrees.
|
||||
Returns:
|
||||
k: The key of the state which is helping add the loop closure constraint.
|
||||
If loop closure is not found, then None is returned.
|
||||
"""
|
||||
if current_estimate:
|
||||
prev_est = current_estimate.atPose3(key+1)
|
||||
curr_est = prev_est.compose(odom_tf)
|
||||
for k in range(1, key+1):
|
||||
pose = current_estimate.atPose3(k)
|
||||
if (abs(pose.matrix()[:3,:3] - curr_est.matrix()[:3,:3]) <= rot_tol*np.pi/180).all() and \
|
||||
(abs(pose.matrix()[:3,3] - curr_est.matrix()[:3,3]) <= xyz_tol).all():
|
||||
return k
|
||||
|
||||
def Pose3_ISAM2_example():
|
||||
"""Perform 3D SLAM given ground truth poses as well as simple
|
||||
loop closure detection."""
|
||||
plt.ion()
|
||||
|
||||
# Declare the 3D translational standard deviations of the prior factor's Gaussian model, in meters.
|
||||
prior_xyz_sigma = 0.3
|
||||
|
||||
# Declare the 3D rotational standard deviations of the prior factor's Gaussian model, in degrees.
|
||||
prior_rpy_sigma = 5
|
||||
|
||||
# Declare the 3D translational standard deviations of the odometry factor's Gaussian model, in meters.
|
||||
odometry_xyz_sigma = 0.2
|
||||
|
||||
# Declare the 3D rotational standard deviations of the odometry factor's Gaussian model, in degrees.
|
||||
odometry_rpy_sigma = 5
|
||||
|
||||
# Although this example only uses linear measurements and Gaussian noise models, it is important
|
||||
# to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
|
||||
# simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
|
||||
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_rpy_sigma*np.pi/180,
|
||||
prior_rpy_sigma*np.pi/180,
|
||||
prior_rpy_sigma*np.pi/180,
|
||||
prior_xyz_sigma,
|
||||
prior_xyz_sigma,
|
||||
prior_xyz_sigma]))
|
||||
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_rpy_sigma*np.pi/180,
|
||||
odometry_rpy_sigma*np.pi/180,
|
||||
odometry_rpy_sigma*np.pi/180,
|
||||
odometry_xyz_sigma,
|
||||
odometry_xyz_sigma,
|
||||
odometry_xyz_sigma]))
|
||||
|
||||
# Create a Nonlinear factor graph as well as the data structure to hold state estimates.
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
initial_estimate = gtsam.Values()
|
||||
|
||||
# Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
|
||||
# update calls are required to perform the relinearization.
|
||||
parameters = gtsam.ISAM2Params()
|
||||
parameters.setRelinearizeThreshold(0.1)
|
||||
parameters.setRelinearizeSkip(1)
|
||||
isam = gtsam.ISAM2(parameters)
|
||||
|
||||
# Create the ground truth poses of the robot trajectory.
|
||||
true_poses = create_poses()
|
||||
|
||||
# Create the ground truth odometry transformations, xyz translations, and roll-pitch-yaw rotations
|
||||
# between each robot pose in the trajectory.
|
||||
odometry_tf = [true_poses[i-1].transformPoseTo(true_poses[i]) for i in range(1, len(true_poses))]
|
||||
odometry_xyz = [(odometry_tf[i].x(), odometry_tf[i].y(), odometry_tf[i].z()) for i in range(len(odometry_tf))]
|
||||
odometry_rpy = [odometry_tf[i].rotation().rpy() for i in range(len(odometry_tf))]
|
||||
|
||||
# Corrupt xyz translations and roll-pitch-yaw rotations with gaussian noise to create noisy odometry measurements.
|
||||
noisy_measurements = [np.random.multivariate_normal(np.hstack((odometry_rpy[i],odometry_xyz[i])), \
|
||||
ODOMETRY_NOISE.covariance()) for i in range(len(odometry_tf))]
|
||||
|
||||
# Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
|
||||
# iSAM2 incremental optimization.
|
||||
graph.push_back(gtsam.PriorFactorPose3(1, true_poses[0], PRIOR_NOISE))
|
||||
initial_estimate.insert(1, true_poses[0].compose(gtsam.Pose3(
|
||||
gtsam.Rot3.Rodrigues(-0.1, 0.2, 0.25), gtsam.Point3(0.05, -0.10, 0.20))))
|
||||
|
||||
# Initialize the current estimate which is used during the incremental inference loop.
|
||||
current_estimate = initial_estimate
|
||||
for i in range(len(odometry_tf)):
|
||||
|
||||
# Obtain the noisy translation and rotation that is received by the robot and corrupted by gaussian noise.
|
||||
noisy_odometry = noisy_measurements[i]
|
||||
|
||||
# Compute the noisy odometry transformation according to the xyz translation and roll-pitch-yaw rotation.
|
||||
noisy_tf = gtsam.Pose3(gtsam.Rot3.RzRyRx(noisy_odometry[:3]), noisy_odometry[3:6].reshape(-1,1))
|
||||
|
||||
# Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
|
||||
loop = determine_loop_closure(noisy_tf, current_estimate, i, xyz_tol=18, rot_tol=30)
|
||||
|
||||
# Add a binary factor in between two existing states if loop closure is detected.
|
||||
# Otherwise, add a binary factor between a newly observed state and the previous state.
|
||||
if loop:
|
||||
graph.push_back(gtsam.BetweenFactorPose3(i + 1, loop, noisy_tf, ODOMETRY_NOISE))
|
||||
else:
|
||||
graph.push_back(gtsam.BetweenFactorPose3(i + 1, i + 2, noisy_tf, ODOMETRY_NOISE))
|
||||
|
||||
# Compute and insert the initialization estimate for the current pose using a noisy odometry measurement.
|
||||
noisy_estimate = current_estimate.atPose3(i + 1).compose(noisy_tf)
|
||||
initial_estimate.insert(i + 2, noisy_estimate)
|
||||
|
||||
# Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
|
||||
isam.update(graph, initial_estimate)
|
||||
current_estimate = isam.calculateEstimate()
|
||||
|
||||
# Report all current state estimates from the iSAM2 optimization.
|
||||
report_on_progress(graph, current_estimate, i)
|
||||
initial_estimate.clear()
|
||||
|
||||
# Print the final covariance matrix for each pose after completing inference.
|
||||
marginals = gtsam.Marginals(graph, current_estimate)
|
||||
i = 1
|
||||
while current_estimate.exists(i):
|
||||
print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
|
||||
i += 1
|
||||
|
||||
plt.ioff()
|
||||
plt.show()
|
||||
|
||||
if __name__ == '__main__':
|
||||
Pose3_ISAM2_example()
|
|
@ -5,10 +5,14 @@ All Rights Reserved
|
|||
|
||||
See LICENSE for the license information
|
||||
|
||||
A script validating the Preintegration of IMU measurements
|
||||
A script validating the Preintegration of IMU measurements.
|
||||
|
||||
Authors: Frank Dellaert, Varun Agrawal.
|
||||
"""
|
||||
|
||||
import math
|
||||
# pylint: disable=invalid-name,unused-import,wrong-import-order
|
||||
|
||||
from typing import Optional, Sequence
|
||||
|
||||
import gtsam
|
||||
import matplotlib.pyplot as plt
|
||||
|
@ -18,25 +22,28 @@ from mpl_toolkits.mplot3d import Axes3D
|
|||
|
||||
IMU_FIG = 1
|
||||
POSES_FIG = 2
|
||||
GRAVITY = 10
|
||||
|
||||
|
||||
class PreintegrationExample(object):
|
||||
|
||||
class PreintegrationExample:
|
||||
"""Base class for all preintegration examples."""
|
||||
@staticmethod
|
||||
def defaultParams(g):
|
||||
def defaultParams(g: float):
|
||||
"""Create default parameters with Z *up* and realistic noise parameters"""
|
||||
params = gtsam.PreintegrationParams.MakeSharedU(g)
|
||||
kGyroSigma = math.radians(0.5) / 60 # 0.5 degree ARW
|
||||
kGyroSigma = np.radians(0.5) / 60 # 0.5 degree ARW
|
||||
kAccelSigma = 0.1 / 60 # 10 cm VRW
|
||||
params.setGyroscopeCovariance(
|
||||
kGyroSigma ** 2 * np.identity(3, float))
|
||||
params.setAccelerometerCovariance(
|
||||
kAccelSigma ** 2 * np.identity(3, float))
|
||||
params.setIntegrationCovariance(
|
||||
0.0000001 ** 2 * np.identity(3, float))
|
||||
params.setGyroscopeCovariance(kGyroSigma**2 * np.identity(3, float))
|
||||
params.setAccelerometerCovariance(kAccelSigma**2 *
|
||||
np.identity(3, float))
|
||||
params.setIntegrationCovariance(0.0000001**2 * np.identity(3, float))
|
||||
return params
|
||||
|
||||
def __init__(self, twist=None, bias=None, dt=1e-2):
|
||||
def __init__(self,
|
||||
twist: Optional[np.ndarray] = None,
|
||||
bias: Optional[gtsam.imuBias.ConstantBias] = None,
|
||||
params: Optional[gtsam.PreintegrationParams] = None,
|
||||
dt: float = 1e-2):
|
||||
"""Initialize with given twist, a pair(angularVelocityVector, velocityVector)."""
|
||||
|
||||
# setup interactive plotting
|
||||
|
@ -48,7 +55,7 @@ class PreintegrationExample(object):
|
|||
else:
|
||||
# default = loop with forward velocity 2m/s, while pitching up
|
||||
# with angular velocity 30 degree/sec (negative in FLU)
|
||||
W = np.array([0, -math.radians(30), 0])
|
||||
W = np.array([0, -np.radians(30), 0])
|
||||
V = np.array([2, 0, 0])
|
||||
|
||||
self.scenario = gtsam.ConstantTwistScenario(W, V)
|
||||
|
@ -58,9 +65,11 @@ class PreintegrationExample(object):
|
|||
self.labels = list('xyz')
|
||||
self.colors = list('rgb')
|
||||
|
||||
# Create runner
|
||||
self.g = 10 # simple gravity constant
|
||||
self.params = self.defaultParams(self.g)
|
||||
if params:
|
||||
self.params = params
|
||||
else:
|
||||
# Default params with simple gravity constant
|
||||
self.params = self.defaultParams(g=GRAVITY)
|
||||
|
||||
if bias is not None:
|
||||
self.actualBias = bias
|
||||
|
@ -69,13 +78,22 @@ class PreintegrationExample(object):
|
|||
gyroBias = np.array([0, 0, 0])
|
||||
self.actualBias = gtsam.imuBias.ConstantBias(accBias, gyroBias)
|
||||
|
||||
self.runner = gtsam.ScenarioRunner(
|
||||
self.scenario, self.params, self.dt, self.actualBias)
|
||||
# Create runner
|
||||
self.runner = gtsam.ScenarioRunner(self.scenario, self.params, self.dt,
|
||||
self.actualBias)
|
||||
|
||||
fig, self.axes = plt.subplots(4, 3)
|
||||
fig.set_tight_layout(True)
|
||||
|
||||
def plotImu(self, t, measuredOmega, measuredAcc):
|
||||
def plotImu(self, t: float, measuredOmega: Sequence,
|
||||
measuredAcc: Sequence):
|
||||
"""
|
||||
Plot IMU measurements.
|
||||
Args:
|
||||
t: The time at which the measurement was recoreded.
|
||||
measuredOmega: Measured angular velocity.
|
||||
measuredAcc: Measured linear acceleration.
|
||||
"""
|
||||
plt.figure(IMU_FIG)
|
||||
|
||||
# plot angular velocity
|
||||
|
@ -108,12 +126,21 @@ class PreintegrationExample(object):
|
|||
ax.scatter(t, measuredAcc[i], color=color, marker='.')
|
||||
ax.set_xlabel('specific force ' + label)
|
||||
|
||||
def plotGroundTruthPose(self, t, scale=0.3, time_interval=0.01):
|
||||
# plot ground truth pose, as well as prediction from integrated IMU measurements
|
||||
def plotGroundTruthPose(self,
|
||||
t: float,
|
||||
scale: float = 0.3,
|
||||
time_interval: float = 0.01):
|
||||
"""
|
||||
Plot ground truth pose, as well as prediction from integrated IMU measurements.
|
||||
Args:
|
||||
t: Time at which the pose was obtained.
|
||||
scale: The scaling factor for the pose axes.
|
||||
time_interval: The time to wait before showing the plot.
|
||||
"""
|
||||
actualPose = self.scenario.pose(t)
|
||||
plot_pose3(POSES_FIG, actualPose, scale)
|
||||
t = actualPose.translation()
|
||||
self.maxDim = max([max(np.abs(t)), self.maxDim])
|
||||
translation = actualPose.translation()
|
||||
self.maxDim = max([max(np.abs(translation)), self.maxDim])
|
||||
ax = plt.gca()
|
||||
ax.set_xlim3d(-self.maxDim, self.maxDim)
|
||||
ax.set_ylim3d(-self.maxDim, self.maxDim)
|
||||
|
@ -121,8 +148,8 @@ class PreintegrationExample(object):
|
|||
|
||||
plt.pause(time_interval)
|
||||
|
||||
def run(self, T=12):
|
||||
# simulate the loop
|
||||
def run(self, T: int = 12):
|
||||
"""Simulate the loop."""
|
||||
for i, t in enumerate(np.arange(0, T, self.dt)):
|
||||
measuredOmega = self.runner.measuredAngularVelocity(t)
|
||||
measuredAcc = self.runner.measuredSpecificForce(t)
|
||||
|
|
|
@ -7,69 +7,75 @@
|
|||
See LICENSE for the license information
|
||||
|
||||
Solve a structure-from-motion problem from a "Bundle Adjustment in the Large" file
|
||||
Author: Frank Dellaert (Python: Akshay Krishnan, John Lambert)
|
||||
Author: Frank Dellaert (Python: Akshay Krishnan, John Lambert, Varun Agrawal)
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
import gtsam
|
||||
from gtsam import (
|
||||
GeneralSFMFactorCal3Bundler,
|
||||
PinholeCameraCal3Bundler,
|
||||
PriorFactorPinholeCameraCal3Bundler,
|
||||
readBal,
|
||||
symbol_shorthand
|
||||
)
|
||||
from gtsam import (GeneralSFMFactorCal3Bundler,
|
||||
PriorFactorPinholeCameraCal3Bundler, PriorFactorPoint3)
|
||||
from gtsam.symbol_shorthand import C, P # type: ignore
|
||||
from gtsam.utils import plot # type: ignore
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
C = symbol_shorthand.C
|
||||
P = symbol_shorthand.P
|
||||
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
||||
|
||||
DEFAULT_BAL_DATASET = "dubrovnik-3-7-pre"
|
||||
|
||||
|
||||
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
||||
def plot_scene(scene_data: gtsam.SfmData, result: gtsam.Values) -> None:
|
||||
"""Plot the SFM results."""
|
||||
plot_vals = gtsam.Values()
|
||||
for cam_idx in range(scene_data.number_cameras()):
|
||||
plot_vals.insert(C(cam_idx),
|
||||
result.atPinholeCameraCal3Bundler(C(cam_idx)).pose())
|
||||
for j in range(scene_data.number_tracks()):
|
||||
plot_vals.insert(P(j), result.atPoint3(P(j)))
|
||||
|
||||
def run(args):
|
||||
plot.plot_3d_points(0, plot_vals, linespec="g.")
|
||||
plot.plot_trajectory(0, plot_vals, title="SFM results")
|
||||
|
||||
plt.show()
|
||||
|
||||
|
||||
def run(args: argparse.Namespace) -> None:
|
||||
""" Run LM optimization with BAL input data and report resulting error """
|
||||
input_file = gtsam.findExampleDataFile(args.input_file)
|
||||
input_file = args.input_file
|
||||
|
||||
# Load the SfM data from file
|
||||
scene_data = readBal(input_file)
|
||||
logging.info(f"read {scene_data.number_tracks()} tracks on {scene_data.number_cameras()} cameras\n")
|
||||
scene_data = gtsam.readBal(input_file)
|
||||
logging.info("read %d tracks on %d cameras\n", scene_data.number_tracks(),
|
||||
scene_data.number_cameras())
|
||||
|
||||
# Create a factor graph
|
||||
graph = gtsam.NonlinearFactorGraph()
|
||||
|
||||
# We share *one* noiseModel between all projection factors
|
||||
noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v
|
||||
noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v
|
||||
|
||||
# Add measurements to the factor graph
|
||||
j = 0
|
||||
for t_idx in range(scene_data.number_tracks()):
|
||||
track = scene_data.track(t_idx) # SfmTrack
|
||||
for j in range(scene_data.number_tracks()):
|
||||
track = scene_data.track(j) # SfmTrack
|
||||
# retrieve the SfmMeasurement objects
|
||||
for m_idx in range(track.number_measurements()):
|
||||
# i represents the camera index, and uv is the 2d measurement
|
||||
i, uv = track.measurement(m_idx)
|
||||
# note use of shorthand symbols C and P
|
||||
graph.add(GeneralSFMFactorCal3Bundler(uv, noise, C(i), P(j)))
|
||||
j += 1
|
||||
|
||||
# Add a prior on pose x1. This indirectly specifies where the origin is.
|
||||
graph.push_back(
|
||||
gtsam.PriorFactorPinholeCameraCal3Bundler(
|
||||
C(0), scene_data.camera(0), gtsam.noiseModel.Isotropic.Sigma(9, 0.1)
|
||||
)
|
||||
)
|
||||
PriorFactorPinholeCameraCal3Bundler(
|
||||
C(0), scene_data.camera(0),
|
||||
gtsam.noiseModel.Isotropic.Sigma(9, 0.1)))
|
||||
# Also add a prior on the position of the first landmark to fix the scale
|
||||
graph.push_back(
|
||||
gtsam.PriorFactorPoint3(
|
||||
P(0), scene_data.track(0).point3(), gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
|
||||
)
|
||||
)
|
||||
PriorFactorPoint3(P(0),
|
||||
scene_data.track(0).point3(),
|
||||
gtsam.noiseModel.Isotropic.Sigma(3, 0.1)))
|
||||
|
||||
# Create initial estimate
|
||||
initial = gtsam.Values()
|
||||
|
@ -81,12 +87,10 @@ def run(args):
|
|||
initial.insert(C(i), camera)
|
||||
i += 1
|
||||
|
||||
j = 0
|
||||
# add each SfmTrack
|
||||
for t_idx in range(scene_data.number_tracks()):
|
||||
track = scene_data.track(t_idx)
|
||||
for j in range(scene_data.number_tracks()):
|
||||
track = scene_data.track(j)
|
||||
initial.insert(P(j), track.point3())
|
||||
j += 1
|
||||
|
||||
# Optimize the graph and print results
|
||||
try:
|
||||
|
@ -94,25 +98,31 @@ def run(args):
|
|||
params.setVerbosityLM("ERROR")
|
||||
lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
|
||||
result = lm.optimize()
|
||||
except Exception as e:
|
||||
except RuntimeError:
|
||||
logging.exception("LM Optimization failed")
|
||||
return
|
||||
|
||||
# Error drops from ~2764.22 to ~0.046
|
||||
logging.info(f"final error: {graph.error(result)}")
|
||||
logging.info("initial error: %f", graph.error(initial))
|
||||
logging.info("final error: %f", graph.error(result))
|
||||
|
||||
plot_scene(scene_data, result)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Main runner."""
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-i',
|
||||
'--input_file',
|
||||
type=str,
|
||||
default=gtsam.findExampleDataFile(DEFAULT_BAL_DATASET),
|
||||
help="""Read SFM data from the specified BAL file.
|
||||
The data format is described here: https://grail.cs.washington.edu/projects/bal/.
|
||||
BAL files contain (nrPoses, nrPoints, nrObservations), followed by (i,j,u,v) tuples,
|
||||
then (wx,wy,wz,tx,ty,tz,f,k1,k1) as Bundler camera calibrations w/ Rodrigues vector
|
||||
and (x,y,z) 3d point initializations.""")
|
||||
run(parser.parse_args())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
'-i',
|
||||
'--input_file',
|
||||
type=str,
|
||||
default="dubrovnik-3-7-pre",
|
||||
help='Read SFM data from the specified BAL file'
|
||||
'The data format is described here: https://grail.cs.washington.edu/projects/bal/.'
|
||||
'BAL files contain (nrPoses, nrPoints, nrObservations), followed by (i,j,u,v) tuples, '
|
||||
'then (wx,wy,wz,tx,ty,tz,f,k1,k1) as Bundler camera calibrations w/ Rodrigues vector'
|
||||
'and (x,y,z) 3d point initializations.'
|
||||
)
|
||||
run(parser.parse_args())
|
||||
|
||||
main()
|
||||
|
|
|
@ -59,8 +59,16 @@ class TestPose3(GtsamTestCase):
|
|||
self.assertEqual(math.sqrt(2.0), x1.range(pose=xl2))
|
||||
|
||||
def test_adjoint(self):
|
||||
"""Test adjoint method."""
|
||||
"""Test adjoint methods."""
|
||||
T = Pose3()
|
||||
xi = np.array([1, 2, 3, 4, 5, 6])
|
||||
# test calling functions
|
||||
T.AdjointMap()
|
||||
T.Adjoint(xi)
|
||||
T.AdjointTranspose(xi)
|
||||
Pose3.adjointMap(xi)
|
||||
Pose3.adjoint(xi, xi)
|
||||
# test correctness of adjoint(x, y)
|
||||
expected = np.dot(Pose3.adjointMap_(xi), xi)
|
||||
actual = Pose3.adjoint_(xi, xi)
|
||||
np.testing.assert_array_equal(actual, expected)
|
||||
|
|
|
@ -606,7 +606,7 @@ Vector3 f(const Point2& a, const Vector3& b, OptionalJacobian<3, 2> H1,
|
|||
if (H1) *H1 << b.y(), b.z(), b.x(), 0, 0, 0;
|
||||
if (H2) *H2 = A;
|
||||
return A * b;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
TEST(ExpressionFactor, MultiplyWithInverseFunction) {
|
||||
|
|
|
@ -114,94 +114,94 @@ using symbol_shorthand::L;
|
|||
|
||||
/* Create GUIDs for Noisemodels */
|
||||
/* ************************************************************************* */
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
|
||||
|
||||
/* Create GUIDs for geometry */
|
||||
/* ************************************************************************* */
|
||||
GTSAM_VALUE_EXPORT(gtsam::Point2);
|
||||
GTSAM_VALUE_EXPORT(gtsam::StereoPoint2);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Point3);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Rot2);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Rot3);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Pose2);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Pose3);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3_S2);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3DS2);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3_S2Stereo);
|
||||
GTSAM_VALUE_EXPORT(gtsam::CalibratedCamera);
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCameraCal3_S2);
|
||||
GTSAM_VALUE_EXPORT(gtsam::StereoCamera);
|
||||
GTSAM_VALUE_EXPORT(gtsam::Point2)
|
||||
GTSAM_VALUE_EXPORT(gtsam::StereoPoint2)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Point3)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Rot2)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Rot3)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Pose2)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Pose3)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3_S2)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3DS2)
|
||||
GTSAM_VALUE_EXPORT(gtsam::Cal3_S2Stereo)
|
||||
GTSAM_VALUE_EXPORT(gtsam::CalibratedCamera)
|
||||
GTSAM_VALUE_EXPORT(gtsam::PinholeCameraCal3_S2)
|
||||
GTSAM_VALUE_EXPORT(gtsam::StereoCamera)
|
||||
|
||||
/* Create GUIDs for factors */
|
||||
/* ************************************************************************* */
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor")
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorPoint2, "gtsam::PriorFactorPoint2");
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorStereoPoint2, "gtsam::PriorFactorStereoPoint2");
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorPoint3, "gtsam::PriorFactorPoint3");
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorRot2, "gtsam::PriorFactorRot2");
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorRot3, "gtsam::PriorFactorRot3");
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorPose2, "gtsam::PriorFactorPose2");
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorPose3, "gtsam::PriorFactorPose3");
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorCal3_S2, "gtsam::PriorFactorCal3_S2");
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorCal3DS2, "gtsam::PriorFactorCal3DS2");
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorCalibratedCamera, "gtsam::PriorFactorCalibratedCamera");
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorStereoCamera, "gtsam::PriorFactorStereoCamera");
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorPoint2, "gtsam::PriorFactorPoint2")
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorStereoPoint2, "gtsam::PriorFactorStereoPoint2")
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorPoint3, "gtsam::PriorFactorPoint3")
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorRot2, "gtsam::PriorFactorRot2")
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorRot3, "gtsam::PriorFactorRot3")
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorPose2, "gtsam::PriorFactorPose2")
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorPose3, "gtsam::PriorFactorPose3")
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorCal3_S2, "gtsam::PriorFactorCal3_S2")
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorCal3DS2, "gtsam::PriorFactorCal3DS2")
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorCalibratedCamera, "gtsam::PriorFactorCalibratedCamera")
|
||||
BOOST_CLASS_EXPORT_GUID(PriorFactorStereoCamera, "gtsam::PriorFactorStereoCamera")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint2, "gtsam::BetweenFactorPoint2");
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint3, "gtsam::BetweenFactorPoint3");
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorRot2, "gtsam::BetweenFactorRot2");
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorRot3, "gtsam::BetweenFactorRot3");
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorPose2, "gtsam::BetweenFactorPose2");
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorPose3, "gtsam::BetweenFactorPose3");
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint2, "gtsam::BetweenFactorPoint2")
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint3, "gtsam::BetweenFactorPoint3")
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorRot2, "gtsam::BetweenFactorRot2")
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorRot3, "gtsam::BetweenFactorRot3")
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorPose2, "gtsam::BetweenFactorPose2")
|
||||
BOOST_CLASS_EXPORT_GUID(BetweenFactorPose3, "gtsam::BetweenFactorPose3")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint2, "gtsam::NonlinearEqualityPoint2");
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoPoint2, "gtsam::NonlinearEqualityStereoPoint2");
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint3, "gtsam::NonlinearEqualityPoint3");
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot2, "gtsam::NonlinearEqualityRot2");
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot3, "gtsam::NonlinearEqualityRot3");
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose2, "gtsam::NonlinearEqualityPose2");
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose3, "gtsam::NonlinearEqualityPose3");
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3_S2, "gtsam::NonlinearEqualityCal3_S2");
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3DS2, "gtsam::NonlinearEqualityCal3DS2");
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCalibratedCamera, "gtsam::NonlinearEqualityCalibratedCamera");
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoCamera, "gtsam::NonlinearEqualityStereoCamera");
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint2, "gtsam::NonlinearEqualityPoint2")
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoPoint2, "gtsam::NonlinearEqualityStereoPoint2")
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint3, "gtsam::NonlinearEqualityPoint3")
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot2, "gtsam::NonlinearEqualityRot2")
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot3, "gtsam::NonlinearEqualityRot3")
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose2, "gtsam::NonlinearEqualityPose2")
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose3, "gtsam::NonlinearEqualityPose3")
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3_S2, "gtsam::NonlinearEqualityCal3_S2")
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3DS2, "gtsam::NonlinearEqualityCal3DS2")
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCalibratedCamera, "gtsam::NonlinearEqualityCalibratedCamera")
|
||||
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoCamera, "gtsam::NonlinearEqualityStereoCamera")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactor2D, "gtsam::RangeFactor2D");
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactor3D, "gtsam::RangeFactor3D");
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorPose2, "gtsam::RangeFactorPose2");
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorPose3, "gtsam::RangeFactorPose3");
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCameraPoint, "gtsam::RangeFactorCalibratedCameraPoint");
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2Point, "gtsam::RangeFactorPinholeCameraCal3_S2Point");
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCamera, "gtsam::RangeFactorCalibratedCamera");
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2, "gtsam::RangeFactorPinholeCameraCal3_S2");
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactor2D, "gtsam::RangeFactor2D")
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactor3D, "gtsam::RangeFactor3D")
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorPose2, "gtsam::RangeFactorPose2")
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorPose3, "gtsam::RangeFactorPose3")
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCameraPoint, "gtsam::RangeFactorCalibratedCameraPoint")
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2Point, "gtsam::RangeFactorPinholeCameraCal3_S2Point")
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCamera, "gtsam::RangeFactorCalibratedCamera")
|
||||
BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2, "gtsam::RangeFactorPinholeCameraCal3_S2")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(BearingRangeFactor2D, "gtsam::BearingRangeFactor2D");
|
||||
BOOST_CLASS_EXPORT_GUID(BearingRangeFactor2D, "gtsam::BearingRangeFactor2D")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3_S2, "gtsam::GenericProjectionFactorCal3_S2");
|
||||
BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3DS2, "gtsam::GenericProjectionFactorCal3DS2");
|
||||
BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3_S2, "gtsam::GenericProjectionFactorCal3_S2")
|
||||
BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3DS2, "gtsam::GenericProjectionFactorCal3DS2")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3_S2, "gtsam::GeneralSFMFactorCal3_S2");
|
||||
BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3DS2, "gtsam::GeneralSFMFactorCal3DS2");
|
||||
BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3_S2, "gtsam::GeneralSFMFactorCal3_S2")
|
||||
BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3DS2, "gtsam::GeneralSFMFactorCal3DS2")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(GeneralSFMFactor2Cal3_S2, "gtsam::GeneralSFMFactor2Cal3_S2");
|
||||
BOOST_CLASS_EXPORT_GUID(GeneralSFMFactor2Cal3_S2, "gtsam::GeneralSFMFactor2Cal3_S2")
|
||||
|
||||
BOOST_CLASS_EXPORT_GUID(GenericStereoFactor3D, "gtsam::GenericStereoFactor3D");
|
||||
BOOST_CLASS_EXPORT_GUID(GenericStereoFactor3D, "gtsam::GenericStereoFactor3D")
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -200,7 +200,7 @@ TEST(SubgraphPreconditioner, system) {
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "JacobianFactor");
|
||||
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "JacobianFactor")
|
||||
|
||||
// Read from XML file
|
||||
static GaussianFactorGraph read(const string& name) {
|
||||
|
|
Loading…
Reference in New Issue