Merge branch 'borglab:develop' into Fix-Cal3Fisheye-Jacobian

release/4.3a0
roderick-koehle 2021-11-22 21:05:15 +01:00 committed by GitHub
commit 8846324b34
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
92 changed files with 5279 additions and 2236 deletions

View File

@ -7,9 +7,9 @@ if (GTSAM_WITH_TBB)
if(TBB_FOUND) if(TBB_FOUND)
set(GTSAM_USE_TBB 1) # This will go into config.h set(GTSAM_USE_TBB 1) # This will go into config.h
if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1")) # if ((${TBB_VERSION} VERSION_GREATER "2021.1") OR (${TBB_VERSION} VERSION_EQUAL "2021.1"))
message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.") # message(FATAL_ERROR "TBB version greater than 2021.1 (oneTBB API) is not yet supported. Use an older version instead.")
endif() # endif()
if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020)) if ((${TBB_VERSION_MAJOR} GREATER 2020) OR (${TBB_VERSION_MAJOR} EQUAL 2020))
set(TBB_GREATER_EQUAL_2020 1) set(TBB_GREATER_EQUAL_2020 1)

View File

@ -5082,6 +5082,394 @@ reference "ex:projection"
\end_inset \end_inset
\end_layout
\begin_layout Subsection
Derivative of Adjoint
\begin_inset CommandInset label
LatexCommand label
name "subsec:pose3_adjoint_deriv"
\end_inset
\end_layout
\begin_layout Standard
Consider
\begin_inset Formula $f:SE(3)\times\mathbb{R}^{6}\rightarrow\mathbb{R}^{6}$
\end_inset
is defined as
\begin_inset Formula $f(T,\xi_{b})=Ad_{T}\hat{\xi}_{b}$
\end_inset
.
The derivative is notated (see Section
\begin_inset CommandInset ref
LatexCommand ref
reference "sec:Derivatives-of-Actions"
plural "false"
caps "false"
noprefix "false"
\end_inset
):
\end_layout
\begin_layout Standard
\begin_inset Formula
\[
Df_{(T,\xi_{b})}(\xi,\delta\xi_{b})=D_{1}f_{(T,\xi_{b})}(\xi)+D_{2}f_{(T,\xi_{b})}(\delta\xi_{b})
\]
\end_inset
First, computing
\begin_inset Formula $D_{2}f_{(T,\xi_{b})}(\xi_{b})$
\end_inset
is easy, as its matrix is simply
\begin_inset Formula $Ad_{T}$
\end_inset
:
\end_layout
\begin_layout Standard
\begin_inset Formula
\[
f(T,\xi_{b}+\delta\xi_{b})=Ad_{T}(\widehat{\xi_{b}+\delta\xi_{b}})=Ad_{T}(\hat{\xi}_{b})+Ad_{T}(\delta\hat{\xi}_{b})
\]
\end_inset
\end_layout
\begin_layout Standard
\begin_inset Formula
\[
D_{2}f_{(T,\xi_{b})}(\xi_{b})=Ad_{T}
\]
\end_inset
We will derive
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi)$
\end_inset
using two approaches.
In the first, we'll define
\begin_inset Formula $g(T,\xi)\triangleq T\exp\hat{\xi}$
\end_inset
.
From Section
\begin_inset CommandInset ref
LatexCommand ref
reference "sec:Derivatives-of-Actions"
plural "false"
caps "false"
noprefix "false"
\end_inset
,
\end_layout
\begin_layout Standard
\begin_inset Formula
\begin{align*}
D_{2}g_{(T,\xi)}(\xi) & =T\hat{\xi}\\
D_{2}g_{(T,\xi)}^{-1}(\xi) & =-\hat{\xi}T^{-1}
\end{align*}
\end_inset
Now we can use the definition of the Adjoint representation
\begin_inset Formula $Ad_{g}\hat{\xi}=g\hat{\xi}g^{-1}$
\end_inset
(aka conjugation by
\begin_inset Formula $g$
\end_inset
) then apply product rule and simplify:
\end_layout
\begin_layout Standard
\begin_inset Formula
\begin{align*}
D_{1}f_{(T,\xi_{b})}(\xi)=D_{1}\left(Ad_{T\exp(\hat{\xi})}\hat{\xi}_{b}\right)(\xi) & =D_{1}\left(g\hat{\xi}_{b}g^{-1}\right)(\xi)\\
& =\left(D_{2}g_{(T,\xi)}(\xi)\right)\hat{\xi}_{b}g^{-1}(T,0)+g(T,0)\hat{\xi}_{b}\left(D_{2}g_{(T,\xi)}^{-1}(\xi)\right)\\
& =T\hat{\xi}\hat{\xi}_{b}T^{-1}-T\hat{\xi}_{b}\hat{\xi}T^{-1}\\
& =T\left(\hat{\xi}\hat{\xi}_{b}-\hat{\xi}_{b}\hat{\xi}\right)T^{-1}\\
& =Ad_{T}(ad_{\hat{\xi}}\hat{\xi}_{b})\\
& =-Ad_{T}(ad_{\hat{\xi}_{b}}\hat{\xi})\\
D_{1}F_{(T,\xi_{b})} & =-(Ad_{T})(ad_{\hat{\xi}_{b}})
\end{align*}
\end_inset
Where
\begin_inset Formula $ad_{\hat{\xi}}:\mathfrak{g}\rightarrow\mathfrak{g}$
\end_inset
is the adjoint map of the lie algebra.
\end_layout
\begin_layout Standard
The second, perhaps more intuitive way of deriving
\begin_inset Formula $D_{1}f_{(T,\xi_{b})}(\xi_{b})$
\end_inset
, would be to use the fact that the derivative at the origin
\begin_inset Formula $D_{1}Ad_{I}\hat{\xi}_{b}=ad_{\hat{\xi}_{b}}$
\end_inset
by definition of the adjoint
\begin_inset Formula $ad_{\xi}$
\end_inset
.
Then applying the property
\begin_inset Formula $Ad_{AB}=Ad_{A}Ad_{B}$
\end_inset
,
\end_layout
\begin_layout Standard
\begin_inset Formula
\[
D_{1}Ad_{T}\hat{\xi}_{b}(\xi)=D_{1}Ad_{T*I}\hat{\xi}_{b}(\xi)=Ad_{T}\left(D_{1}Ad_{I}\hat{\xi}_{b}(\xi)\right)=Ad_{T}\left(ad_{\hat{\xi}}(\hat{\xi}_{b})\right)=-Ad_{T}\left(ad_{\hat{\xi}_{b}}(\hat{\xi})\right)
\]
\end_inset
\end_layout
\begin_layout Subsection
Derivative of AdjointTranspose
\end_layout
\begin_layout Standard
The transpose of the Adjoint,
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\xout off
\uuline off
\uwave off
\noun off
\color none
\begin_inset Formula $Ad_{T}^{T}:\mathfrak{g^{*}\rightarrow g^{*}}$
\end_inset
, is useful as a way to change the reference frame of vectors in the dual
space
\family default
\series default
\shape default
\size default
\emph default
\bar default
\strikeout default
\xout default
\uuline default
\uwave default
\noun default
\color inherit
(note the
\begin_inset Formula $^{*}$
\end_inset
denoting that we are now in the dual space)
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\xout off
\uuline off
\uwave off
\noun off
\color none
.
To be more concrete, where
\family default
\series default
\shape default
\size default
\emph default
\bar default
\strikeout default
\xout default
\uuline default
\uwave default
\noun default
\color inherit
as
\begin_inset Formula $Ad_{T}\hat{\xi}_{b}$
\end_inset
converts the
\emph on
twist
\emph default
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\xout off
\uuline off
\uwave off
\noun off
\color none
\begin_inset Formula $\xi_{b}$
\end_inset
from the
\begin_inset Formula $T$
\end_inset
frame,
\family default
\series default
\shape default
\size default
\emph default
\bar default
\strikeout default
\xout default
\uuline default
\uwave default
\noun default
\color inherit
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\xout off
\uuline off
\uwave off
\noun off
\color none
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
\end_inset
converts the
\family default
\series default
\shape default
\size default
\emph on
\bar default
\strikeout default
\xout default
\uuline default
\uwave default
\noun default
\color inherit
wrench
\emph default
\family roman
\series medium
\shape up
\size normal
\emph off
\bar no
\strikeout off
\xout off
\uuline off
\uwave off
\noun off
\color none
\begin_inset Formula $\xi_{b}^{*}$
\end_inset
from the
\begin_inset Formula $T$
\end_inset
frame
\family default
\series default
\shape default
\size default
\emph default
\bar default
\strikeout default
\xout default
\uuline default
\uwave default
\noun default
\color inherit
.
It's difficult to apply a similar derivation as in Section
\begin_inset CommandInset ref
LatexCommand ref
reference "subsec:pose3_adjoint_deriv"
plural "false"
caps "false"
noprefix "false"
\end_inset
for the derivative of
\begin_inset Formula $Ad_{T}^{T}\hat{\xi}_{b}^{*}$
\end_inset
because
\begin_inset Formula $Ad_{T}^{T}$
\end_inset
cannot be naturally defined as a conjugation, so we resort to crunching
through the algebra.
The details are omitted but the result is a form that vaguely resembles
(but does not exactly match)
\begin_inset Formula $ad(Ad_{T}^{T}\hat{\xi}_{b}^{*})$
\end_inset
:
\end_layout
\begin_layout Standard
\begin_inset Formula
\begin{align*}
\begin{bmatrix}\omega_{T}\\
v_{T}
\end{bmatrix}^{*} & \triangleq Ad_{T}^{T}\hat{\xi}_{b}^{*}\\
D_{1}Ad_{T}^{T}\hat{\xi}_{b}^{*}(\xi) & =\begin{bmatrix}\hat{\omega}_{T} & \hat{v}_{T}\\
\hat{v}_{T} & 0
\end{bmatrix}
\end{align*}
\end_inset
\end_layout \end_layout
\begin_layout Subsection \begin_layout Subsection

Binary file not shown.

View File

@ -370,4 +370,4 @@ public:
* the gtsam namespace to be more easily enforced as testable * the gtsam namespace to be more easily enforced as testable
*/ */
#define GTSAM_CONCEPT_LIE_INST(T) template class gtsam::IsLieGroup<T>; #define GTSAM_CONCEPT_LIE_INST(T) template class gtsam::IsLieGroup<T>;
#define GTSAM_CONCEPT_LIE_TYPE(T) typedef gtsam::IsLieGroup<T> _gtsam_IsLieGroup_##T; #define GTSAM_CONCEPT_LIE_TYPE(T) using _gtsam_IsLieGroup_##T = gtsam::IsLieGroup<T>;

View File

@ -178,4 +178,4 @@ struct FixedDimension {
// * the gtsam namespace to be more easily enforced as testable // * the gtsam namespace to be more easily enforced as testable
// */ // */
#define GTSAM_CONCEPT_MANIFOLD_INST(T) template class gtsam::IsManifold<T>; #define GTSAM_CONCEPT_MANIFOLD_INST(T) template class gtsam::IsManifold<T>;
#define GTSAM_CONCEPT_MANIFOLD_TYPE(T) typedef gtsam::IsManifold<T> _gtsam_IsManifold_##T; #define GTSAM_CONCEPT_MANIFOLD_TYPE(T) using _gtsam_IsManifold_##T = gtsam::IsManifold<T>;

View File

@ -46,28 +46,28 @@ typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> M
// Create handy typedefs and constants for square-size matrices // Create handy typedefs and constants for square-size matrices
// MatrixMN, MatrixN = MatrixNN, I_NxN, and Z_NxN, for M,N=1..9 // MatrixMN, MatrixN = MatrixNN, I_NxN, and Z_NxN, for M,N=1..9
#define GTSAM_MAKE_MATRIX_DEFS(N) \ #define GTSAM_MAKE_MATRIX_DEFS(N) \
typedef Eigen::Matrix<double, N, N> Matrix##N; \ using Matrix##N = Eigen::Matrix<double, N, N>; \
typedef Eigen::Matrix<double, 1, N> Matrix1##N; \ using Matrix1##N = Eigen::Matrix<double, 1, N>; \
typedef Eigen::Matrix<double, 2, N> Matrix2##N; \ using Matrix2##N = Eigen::Matrix<double, 2, N>; \
typedef Eigen::Matrix<double, 3, N> Matrix3##N; \ using Matrix3##N = Eigen::Matrix<double, 3, N>; \
typedef Eigen::Matrix<double, 4, N> Matrix4##N; \ using Matrix4##N = Eigen::Matrix<double, 4, N>; \
typedef Eigen::Matrix<double, 5, N> Matrix5##N; \ using Matrix5##N = Eigen::Matrix<double, 5, N>; \
typedef Eigen::Matrix<double, 6, N> Matrix6##N; \ using Matrix6##N = Eigen::Matrix<double, 6, N>; \
typedef Eigen::Matrix<double, 7, N> Matrix7##N; \ using Matrix7##N = Eigen::Matrix<double, 7, N>; \
typedef Eigen::Matrix<double, 8, N> Matrix8##N; \ using Matrix8##N = Eigen::Matrix<double, 8, N>; \
typedef Eigen::Matrix<double, 9, N> Matrix9##N; \ using Matrix9##N = Eigen::Matrix<double, 9, N>; \
static const Eigen::MatrixBase<Matrix##N>::IdentityReturnType I_##N##x##N = Matrix##N::Identity(); \ static const Eigen::MatrixBase<Matrix##N>::IdentityReturnType I_##N##x##N = Matrix##N::Identity(); \
static const Eigen::MatrixBase<Matrix##N>::ConstantReturnType Z_##N##x##N = Matrix##N::Zero(); static const Eigen::MatrixBase<Matrix##N>::ConstantReturnType Z_##N##x##N = Matrix##N::Zero();
GTSAM_MAKE_MATRIX_DEFS(1); GTSAM_MAKE_MATRIX_DEFS(1)
GTSAM_MAKE_MATRIX_DEFS(2); GTSAM_MAKE_MATRIX_DEFS(2)
GTSAM_MAKE_MATRIX_DEFS(3); GTSAM_MAKE_MATRIX_DEFS(3)
GTSAM_MAKE_MATRIX_DEFS(4); GTSAM_MAKE_MATRIX_DEFS(4)
GTSAM_MAKE_MATRIX_DEFS(5); GTSAM_MAKE_MATRIX_DEFS(5)
GTSAM_MAKE_MATRIX_DEFS(6); GTSAM_MAKE_MATRIX_DEFS(6)
GTSAM_MAKE_MATRIX_DEFS(7); GTSAM_MAKE_MATRIX_DEFS(7)
GTSAM_MAKE_MATRIX_DEFS(8); GTSAM_MAKE_MATRIX_DEFS(8)
GTSAM_MAKE_MATRIX_DEFS(9); GTSAM_MAKE_MATRIX_DEFS(9)
// Matrix expressions for accessing parts of matrices // Matrix expressions for accessing parts of matrices
typedef Eigen::Block<Matrix> SubMatrix; typedef Eigen::Block<Matrix> SubMatrix;

View File

@ -173,4 +173,4 @@ namespace gtsam {
* @deprecated please use BOOST_CONCEPT_ASSERT and * @deprecated please use BOOST_CONCEPT_ASSERT and
*/ */
#define GTSAM_CONCEPT_TESTABLE_INST(T) template class gtsam::IsTestable<T>; #define GTSAM_CONCEPT_TESTABLE_INST(T) template class gtsam::IsTestable<T>;
#define GTSAM_CONCEPT_TESTABLE_TYPE(T) typedef gtsam::IsTestable<T> _gtsam_Testable_##T; #define GTSAM_CONCEPT_TESTABLE_TYPE(T) using _gtsam_Testable_##T = gtsam::IsTestable<T>;

View File

@ -48,18 +48,18 @@ static const Eigen::MatrixBase<Vector3>::ConstantReturnType Z_3x1 = Vector3::Zer
// Create handy typedefs and constants for vectors with N>3 // Create handy typedefs and constants for vectors with N>3
// VectorN and Z_Nx1, for N=1..9 // VectorN and Z_Nx1, for N=1..9
#define GTSAM_MAKE_VECTOR_DEFS(N) \ #define GTSAM_MAKE_VECTOR_DEFS(N) \
typedef Eigen::Matrix<double, N, 1> Vector##N; \ using Vector##N = Eigen::Matrix<double, N, 1>; \
static const Eigen::MatrixBase<Vector##N>::ConstantReturnType Z_##N##x1 = Vector##N::Zero(); static const Eigen::MatrixBase<Vector##N>::ConstantReturnType Z_##N##x1 = Vector##N::Zero();
GTSAM_MAKE_VECTOR_DEFS(4); GTSAM_MAKE_VECTOR_DEFS(4)
GTSAM_MAKE_VECTOR_DEFS(5); GTSAM_MAKE_VECTOR_DEFS(5)
GTSAM_MAKE_VECTOR_DEFS(6); GTSAM_MAKE_VECTOR_DEFS(6)
GTSAM_MAKE_VECTOR_DEFS(7); GTSAM_MAKE_VECTOR_DEFS(7)
GTSAM_MAKE_VECTOR_DEFS(8); GTSAM_MAKE_VECTOR_DEFS(8)
GTSAM_MAKE_VECTOR_DEFS(9); GTSAM_MAKE_VECTOR_DEFS(9)
GTSAM_MAKE_VECTOR_DEFS(10); GTSAM_MAKE_VECTOR_DEFS(10)
GTSAM_MAKE_VECTOR_DEFS(11); GTSAM_MAKE_VECTOR_DEFS(11)
GTSAM_MAKE_VECTOR_DEFS(12); GTSAM_MAKE_VECTOR_DEFS(12)
typedef Eigen::VectorBlock<Vector> SubVector; typedef Eigen::VectorBlock<Vector> SubVector;
typedef Eigen::VectorBlock<const Vector> ConstSubVector; typedef Eigen::VectorBlock<const Vector> ConstSubVector;

View File

@ -32,7 +32,7 @@ void testDefaultChart(TestResult& result_,
const std::string& name_, const std::string& name_,
const T& value) { const T& value) {
GTSAM_CONCEPT_TESTABLE_TYPE(T); GTSAM_CONCEPT_TESTABLE_TYPE(T)
typedef typename gtsam::DefaultChart<T> Chart; typedef typename gtsam::DefaultChart<T> Chart;
typedef typename Chart::vector Vector; typedef typename Chart::vector Vector;

View File

@ -158,9 +158,8 @@ void DepthFirstForestParallel(FOREST& forest, DATA& rootData,
// Typedefs // Typedefs
typedef typename FOREST::Node Node; typedef typename FOREST::Node Node;
tbb::task::spawn_root_and_wait(
internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre, internal::CreateRootTask<Node>(forest.roots(), rootData, visitorPre,
visitorPost, problemSizeThreshold)); visitorPost, problemSizeThreshold);
#else #else
DepthFirstForest(forest, rootData, visitorPre, visitorPost); DepthFirstForest(forest, rootData, visitorPre, visitorPost);
#endif #endif

View File

@ -22,7 +22,7 @@
#include <boost/make_shared.hpp> #include <boost/make_shared.hpp>
#ifdef GTSAM_USE_TBB #ifdef GTSAM_USE_TBB
#include <tbb/task.h> // tbb::task, tbb::task_list #include <tbb/task_group.h> // tbb::task_group
#include <tbb/scalable_allocator.h> // tbb::scalable_allocator #include <tbb/scalable_allocator.h> // tbb::scalable_allocator
namespace gtsam { namespace gtsam {
@ -34,7 +34,7 @@ namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
template<typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST> template<typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
class PreOrderTask : public tbb::task class PreOrderTask
{ {
public: public:
const boost::shared_ptr<NODE>& treeNode; const boost::shared_ptr<NODE>& treeNode;
@ -42,28 +42,30 @@ namespace gtsam {
VISITOR_PRE& visitorPre; VISITOR_PRE& visitorPre;
VISITOR_POST& visitorPost; VISITOR_POST& visitorPost;
int problemSizeThreshold; int problemSizeThreshold;
tbb::task_group& tg;
bool makeNewTasks; bool makeNewTasks;
bool isPostOrderPhase; // Keep track of order phase across multiple calls to the same functor
mutable bool isPostOrderPhase;
PreOrderTask(const boost::shared_ptr<NODE>& treeNode, const boost::shared_ptr<DATA>& myData, PreOrderTask(const boost::shared_ptr<NODE>& treeNode, const boost::shared_ptr<DATA>& myData,
VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold,
bool makeNewTasks = true) tbb::task_group& tg, bool makeNewTasks = true)
: treeNode(treeNode), : treeNode(treeNode),
myData(myData), myData(myData),
visitorPre(visitorPre), visitorPre(visitorPre),
visitorPost(visitorPost), visitorPost(visitorPost),
problemSizeThreshold(problemSizeThreshold), problemSizeThreshold(problemSizeThreshold),
tg(tg),
makeNewTasks(makeNewTasks), makeNewTasks(makeNewTasks),
isPostOrderPhase(false) {} isPostOrderPhase(false) {}
tbb::task* execute() override void operator()() const
{ {
if(isPostOrderPhase) if(isPostOrderPhase)
{ {
// Run the post-order visitor since this task was recycled to run the post-order visitor // Run the post-order visitor since this task was recycled to run the post-order visitor
(void) visitorPost(treeNode, *myData); (void) visitorPost(treeNode, *myData);
return nullptr;
} }
else else
{ {
@ -71,14 +73,10 @@ namespace gtsam {
{ {
if(!treeNode->children.empty()) if(!treeNode->children.empty())
{ {
// Allocate post-order task as a continuation
isPostOrderPhase = true;
recycle_as_continuation();
bool overThreshold = (treeNode->problemSize() >= problemSizeThreshold); bool overThreshold = (treeNode->problemSize() >= problemSizeThreshold);
tbb::task* firstChild = 0; // If we have child tasks, start subtasks and wait for them to complete
tbb::task_list childTasks; tbb::task_group ctg;
for(const boost::shared_ptr<NODE>& child: treeNode->children) for(const boost::shared_ptr<NODE>& child: treeNode->children)
{ {
// Process child in a subtask. Important: Run visitorPre before calling // Process child in a subtask. Important: Run visitorPre before calling
@ -86,37 +84,30 @@ namespace gtsam {
// allocated an extra child, this causes a TBB error. // allocated an extra child, this causes a TBB error.
boost::shared_ptr<DATA> childData = boost::allocate_shared<DATA>( boost::shared_ptr<DATA> childData = boost::allocate_shared<DATA>(
tbb::scalable_allocator<DATA>(), visitorPre(child, *myData)); tbb::scalable_allocator<DATA>(), visitorPre(child, *myData));
tbb::task* childTask = ctg.run(PreOrderTask(child, childData, visitorPre, visitorPost,
new (allocate_child()) PreOrderTask(child, childData, visitorPre, visitorPost, problemSizeThreshold, ctg, overThreshold));
problemSizeThreshold, overThreshold);
if (firstChild)
childTasks.push_back(*childTask);
else
firstChild = childTask;
} }
ctg.wait();
// If we have child tasks, start subtasks and wait for them to complete // Allocate post-order task as a continuation
set_ref_count((int)treeNode->children.size()); isPostOrderPhase = true;
spawn(childTasks); tg.run(*this);
return firstChild;
} }
else else
{ {
// Run the post-order visitor in this task if we have no children // Run the post-order visitor in this task if we have no children
(void) visitorPost(treeNode, *myData); (void) visitorPost(treeNode, *myData);
return nullptr;
} }
} }
else else
{ {
// Process this node and its children in this task // Process this node and its children in this task
processNodeRecursively(treeNode, *myData); processNodeRecursively(treeNode, *myData);
return nullptr;
} }
} }
} }
void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData) void processNodeRecursively(const boost::shared_ptr<NODE>& node, DATA& myData) const
{ {
for(const boost::shared_ptr<NODE>& child: node->children) for(const boost::shared_ptr<NODE>& child: node->children)
{ {
@ -131,7 +122,7 @@ namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
template<typename ROOTS, typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST> template<typename ROOTS, typename NODE, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
class RootTask : public tbb::task class RootTask
{ {
public: public:
const ROOTS& roots; const ROOTS& roots;
@ -139,37 +130,30 @@ namespace gtsam {
VISITOR_PRE& visitorPre; VISITOR_PRE& visitorPre;
VISITOR_POST& visitorPost; VISITOR_POST& visitorPost;
int problemSizeThreshold; int problemSizeThreshold;
tbb::task_group& tg;
RootTask(const ROOTS& roots, DATA& myData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, RootTask(const ROOTS& roots, DATA& myData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost,
int problemSizeThreshold) : int problemSizeThreshold, tbb::task_group& tg) :
roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost), roots(roots), myData(myData), visitorPre(visitorPre), visitorPost(visitorPost),
problemSizeThreshold(problemSizeThreshold) {} problemSizeThreshold(problemSizeThreshold), tg(tg) {}
tbb::task* execute() override void operator()() const
{ {
typedef PreOrderTask<NODE, DATA, VISITOR_PRE, VISITOR_POST> PreOrderTask; typedef PreOrderTask<NODE, DATA, VISITOR_PRE, VISITOR_POST> PreOrderTask;
// Create data and tasks for our children // Create data and tasks for our children
tbb::task_list tasks;
for(const boost::shared_ptr<NODE>& root: roots) for(const boost::shared_ptr<NODE>& root: roots)
{ {
boost::shared_ptr<DATA> rootData = boost::allocate_shared<DATA>(tbb::scalable_allocator<DATA>(), visitorPre(root, myData)); boost::shared_ptr<DATA> rootData = boost::allocate_shared<DATA>(tbb::scalable_allocator<DATA>(), visitorPre(root, myData));
tasks.push_back(*new(allocate_child()) tg.run(PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
PreOrderTask(root, rootData, visitorPre, visitorPost, problemSizeThreshold));
} }
// Set TBB ref count
set_ref_count(1 + (int) roots.size());
// Spawn tasks
spawn_and_wait_for_all(tasks);
// Return nullptr
return nullptr;
} }
}; };
template<typename NODE, typename ROOTS, typename DATA, typename VISITOR_PRE, typename VISITOR_POST> template<typename NODE, typename ROOTS, typename DATA, typename VISITOR_PRE, typename VISITOR_POST>
RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST>& void CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
CreateRootTask(const ROOTS& roots, DATA& rootData, VISITOR_PRE& visitorPre, VISITOR_POST& visitorPost, int problemSizeThreshold)
{ {
typedef RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST> RootTask; typedef RootTask<ROOTS, NODE, DATA, VISITOR_PRE, VISITOR_POST> RootTask;
return *new(tbb::task::allocate_root()) RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold); tbb::task_group tg;
tg.run_and_wait(RootTask(roots, rootData, visitorPre, visitorPost, problemSizeThreshold, tg));
} }
} }

View File

@ -35,7 +35,7 @@ class GTSAM_EXPORT PinholeBaseK: public PinholeBase {
private: private:
GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION); GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION)
// Get dimensions of calibration type at compile time // Get dimensions of calibration type at compile time
static const int DimK = FixedDimension<CALIBRATION>::value; static const int DimK = FixedDimension<CALIBRATION>::value;

View File

@ -28,7 +28,7 @@ using namespace std;
namespace gtsam { namespace gtsam {
/** instantiate concept checks */ /** instantiate concept checks */
GTSAM_CONCEPT_POSE_INST(Pose2); GTSAM_CONCEPT_POSE_INST(Pose2)
static const Rot2 R_PI_2(Rot2::fromCosSin(0., 1.)); static const Rot2 R_PI_2(Rot2::fromCosSin(0., 1.));

View File

@ -30,7 +30,7 @@ using std::vector;
using Point3Pairs = vector<Point3Pair>; using Point3Pairs = vector<Point3Pair>;
/** instantiate concept checks */ /** instantiate concept checks */
GTSAM_CONCEPT_POSE_INST(Pose3); GTSAM_CONCEPT_POSE_INST(Pose3)
/* ************************************************************************* */ /* ************************************************************************* */
Pose3::Pose3(const Pose2& pose2) : Pose3::Pose3(const Pose2& pose2) :
@ -63,6 +63,46 @@ Matrix6 Pose3::AdjointMap() const {
return adj; return adj;
} }
/* ************************************************************************* */
// Calculate AdjointMap applied to xi_b, with Jacobians
Vector6 Pose3::Adjoint(const Vector6& xi_b, OptionalJacobian<6, 6> H_pose,
OptionalJacobian<6, 6> H_xib) const {
const Matrix6 Ad = AdjointMap();
// Jacobians
// D1 Ad_T(xi_b) = D1 Ad_T Ad_I(xi_b) = Ad_T * D1 Ad_I(xi_b) = Ad_T * ad_xi_b
// D2 Ad_T(xi_b) = Ad_T
// See docs/math.pdf for more details.
// In D1 calculation, we could be more efficient by writing it out, but do not
// for readability
if (H_pose) *H_pose = -Ad * adjointMap(xi_b);
if (H_xib) *H_xib = Ad;
return Ad * xi_b;
}
/* ************************************************************************* */
/// The dual version of Adjoint
Vector6 Pose3::AdjointTranspose(const Vector6& x, OptionalJacobian<6, 6> H_pose,
OptionalJacobian<6, 6> H_x) const {
const Matrix6 Ad = AdjointMap();
const Vector6 AdTx = Ad.transpose() * x;
// Jacobians
// See docs/math.pdf for more details.
if (H_pose) {
const auto w_T_hat = skewSymmetric(AdTx.head<3>()),
v_T_hat = skewSymmetric(AdTx.tail<3>());
*H_pose << w_T_hat, v_T_hat, //
/* */ v_T_hat, Z_3x3;
}
if (H_x) {
*H_x = Ad.transpose();
}
return AdTx;
}
/* ************************************************************************* */ /* ************************************************************************* */
Matrix6 Pose3::adjointMap(const Vector6& xi) { Matrix6 Pose3::adjointMap(const Vector6& xi) {
Matrix3 w_hat = skewSymmetric(xi(0), xi(1), xi(2)); Matrix3 w_hat = skewSymmetric(xi(0), xi(1), xi(2));
@ -75,7 +115,7 @@ Matrix6 Pose3::adjointMap(const Vector6& xi) {
/* ************************************************************************* */ /* ************************************************************************* */
Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y, Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
OptionalJacobian<6, 6> Hxi) { OptionalJacobian<6, 6> Hxi, OptionalJacobian<6, 6> H_y) {
if (Hxi) { if (Hxi) {
Hxi->setZero(); Hxi->setZero();
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
@ -86,12 +126,14 @@ Vector6 Pose3::adjoint(const Vector6& xi, const Vector6& y,
Hxi->col(i) = Gi * y; Hxi->col(i) = Gi * y;
} }
} }
return adjointMap(xi) * y; const Matrix6& ad_xi = adjointMap(xi);
if (H_y) *H_y = ad_xi;
return ad_xi * y;
} }
/* ************************************************************************* */ /* ************************************************************************* */
Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y, Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
OptionalJacobian<6, 6> Hxi) { OptionalJacobian<6, 6> Hxi, OptionalJacobian<6, 6> H_y) {
if (Hxi) { if (Hxi) {
Hxi->setZero(); Hxi->setZero();
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
@ -102,7 +144,9 @@ Vector6 Pose3::adjointTranspose(const Vector6& xi, const Vector6& y,
Hxi->col(i) = GTi * y; Hxi->col(i) = GTi * y;
} }
} }
return adjointMap(xi).transpose() * y; const Matrix6& adT_xi = adjointMap(xi).transpose();
if (H_y) *H_y = adT_xi;
return adT_xi * y;
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -145,15 +145,22 @@ public:
* Calculate Adjoint map, transforming a twist in this pose's (i.e, body) frame to the world spatial frame * Calculate Adjoint map, transforming a twist in this pose's (i.e, body) frame to the world spatial frame
* Ad_pose is 6*6 matrix that when applied to twist xi \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$, returns Ad_pose(xi) * Ad_pose is 6*6 matrix that when applied to twist xi \f$ [R_x,R_y,R_z,T_x,T_y,T_z] \f$, returns Ad_pose(xi)
*/ */
Matrix6 AdjointMap() const; /// FIXME Not tested - marked as incorrect Matrix6 AdjointMap() const;
/** /**
* Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a body-fixed velocity, transforming it to the spatial frame * Apply this pose's AdjointMap Ad_g to a twist \f$ \xi_b \f$, i.e. a
* body-fixed velocity, transforming it to the spatial frame
* \f$ \xi^s = g*\xi^b*g^{-1} = Ad_g * \xi^b \f$ * \f$ \xi^s = g*\xi^b*g^{-1} = Ad_g * \xi^b \f$
* Note that H_xib = AdjointMap()
*/ */
Vector6 Adjoint(const Vector6& xi_b) const { Vector6 Adjoint(const Vector6& xi_b,
return AdjointMap() * xi_b; OptionalJacobian<6, 6> H_this = boost::none,
} /// FIXME Not tested - marked as incorrect OptionalJacobian<6, 6> H_xib = boost::none) const;
/// The dual version of Adjoint
Vector6 AdjointTranspose(const Vector6& x,
OptionalJacobian<6, 6> H_this = boost::none,
OptionalJacobian<6, 6> H_x = boost::none) const;
/** /**
* Compute the [ad(w,v)] operator as defined in [Kobilarov09siggraph], pg 11 * Compute the [ad(w,v)] operator as defined in [Kobilarov09siggraph], pg 11
@ -170,13 +177,14 @@ public:
* and its inverse transpose in the discrete Euler Poincare' (DEP) operator. * and its inverse transpose in the discrete Euler Poincare' (DEP) operator.
* *
*/ */
static Matrix6 adjointMap(const Vector6 &xi); static Matrix6 adjointMap(const Vector6& xi);
/** /**
* Action of the adjointMap on a Lie-algebra vector y, with optional derivatives * Action of the adjointMap on a Lie-algebra vector y, with optional derivatives
*/ */
static Vector6 adjoint(const Vector6 &xi, const Vector6 &y, static Vector6 adjoint(const Vector6& xi, const Vector6& y,
OptionalJacobian<6, 6> Hxi = boost::none); OptionalJacobian<6, 6> Hxi = boost::none,
OptionalJacobian<6, 6> H_y = boost::none);
// temporary fix for wrappers until case issue is resolved // temporary fix for wrappers until case issue is resolved
static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);} static Matrix6 adjointMap_(const Vector6 &xi) { return adjointMap(xi);}
@ -186,7 +194,8 @@ public:
* The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space. * The dual version of adjoint action, acting on the dual space of the Lie-algebra vector space.
*/ */
static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y, static Vector6 adjointTranspose(const Vector6& xi, const Vector6& y,
OptionalJacobian<6, 6> Hxi = boost::none); OptionalJacobian<6, 6> Hxi = boost::none,
OptionalJacobian<6, 6> H_y = boost::none);
/// Derivative of Expmap /// Derivative of Expmap
static Matrix6 ExpmapDerivative(const Vector6& xi); static Matrix6 ExpmapDerivative(const Vector6& xi);

View File

@ -72,5 +72,5 @@ private:
/** Pose Concept macros */ /** Pose Concept macros */
#define GTSAM_CONCEPT_POSE_INST(T) template class gtsam::PoseConcept<T>; #define GTSAM_CONCEPT_POSE_INST(T) template class gtsam::PoseConcept<T>;
#define GTSAM_CONCEPT_POSE_TYPE(T) typedef gtsam::PoseConcept<T> _gtsam_PoseConcept##T; #define GTSAM_CONCEPT_POSE_TYPE(T) using _gtsam_PoseConcept##T = gtsam::PoseConcept<T>;

View File

@ -473,6 +473,9 @@ class Pose3 {
Vector logmap(const gtsam::Pose3& pose); Vector logmap(const gtsam::Pose3& pose);
Matrix AdjointMap() const; Matrix AdjointMap() const;
Vector Adjoint(Vector xi) const; Vector Adjoint(Vector xi) const;
Vector AdjointTranspose(Vector xi) const;
static Matrix adjointMap(Vector xi);
static Vector adjoint(Vector xi, Vector y);
static Matrix adjointMap_(Vector xi); static Matrix adjointMap_(Vector xi);
static Vector adjoint_(Vector xi, Vector y); static Vector adjoint_(Vector xi, Vector y);
static Vector adjointTranspose(Vector xi, Vector y); static Vector adjointTranspose(Vector xi, Vector y);

View File

@ -145,6 +145,81 @@ TEST(Pose3, Adjoint_full)
EXPECT(assert_equal(expected3, Pose3::Expmap(xiprime3), 1e-6)); EXPECT(assert_equal(expected3, Pose3::Expmap(xiprime3), 1e-6));
} }
/* ************************************************************************* */
// Check Adjoint numerical derivatives
TEST(Pose3, Adjoint_jacobians)
{
Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished();
// Check evaluation sanity check
EQUALITY(static_cast<gtsam::Vector>(T.AdjointMap() * xi), T.Adjoint(xi));
EQUALITY(static_cast<gtsam::Vector>(T2.AdjointMap() * xi), T2.Adjoint(xi));
EQUALITY(static_cast<gtsam::Vector>(T3.AdjointMap() * xi), T3.Adjoint(xi));
// Check jacobians
Matrix6 actualH1, actualH2, expectedH1, expectedH2;
std::function<Vector6(const Pose3&, const Vector6&)> Adjoint_proxy =
[&](const Pose3& T, const Vector6& xi) { return T.Adjoint(xi); };
T.Adjoint(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(Adjoint_proxy, T, xi);
expectedH2 = numericalDerivative22(Adjoint_proxy, T, xi);
EXPECT(assert_equal(expectedH1, actualH1));
EXPECT(assert_equal(expectedH2, actualH2));
T2.Adjoint(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(Adjoint_proxy, T2, xi);
expectedH2 = numericalDerivative22(Adjoint_proxy, T2, xi);
EXPECT(assert_equal(expectedH1, actualH1));
EXPECT(assert_equal(expectedH2, actualH2));
T3.Adjoint(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(Adjoint_proxy, T3, xi);
expectedH2 = numericalDerivative22(Adjoint_proxy, T3, xi);
EXPECT(assert_equal(expectedH1, actualH1));
EXPECT(assert_equal(expectedH2, actualH2));
}
/* ************************************************************************* */
// Check AdjointTranspose and jacobians
TEST(Pose3, AdjointTranspose)
{
Vector6 xi = (Vector6() << 0.1, 1.2, 2.3, 3.1, 1.4, 4.5).finished();
// Check evaluation
EQUALITY(static_cast<Vector>(T.AdjointMap().transpose() * xi),
T.AdjointTranspose(xi));
EQUALITY(static_cast<Vector>(T2.AdjointMap().transpose() * xi),
T2.AdjointTranspose(xi));
EQUALITY(static_cast<Vector>(T3.AdjointMap().transpose() * xi),
T3.AdjointTranspose(xi));
// Check jacobians
Matrix6 actualH1, actualH2, expectedH1, expectedH2;
std::function<Vector6(const Pose3&, const Vector6&)> AdjointTranspose_proxy =
[&](const Pose3& T, const Vector6& xi) {
return T.AdjointTranspose(xi);
};
T.AdjointTranspose(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T, xi);
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T, xi);
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
EXPECT(assert_equal(expectedH2, actualH2));
T2.AdjointTranspose(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T2, xi);
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T2, xi);
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
EXPECT(assert_equal(expectedH2, actualH2));
T3.AdjointTranspose(xi, actualH1, actualH2);
expectedH1 = numericalDerivative21(AdjointTranspose_proxy, T3, xi);
expectedH2 = numericalDerivative22(AdjointTranspose_proxy, T3, xi);
EXPECT(assert_equal(expectedH1, actualH1, 1e-8));
EXPECT(assert_equal(expectedH2, actualH2));
}
/* ************************************************************************* */ /* ************************************************************************* */
// assert that T*wedge(xi)*T^-1 is equal to wedge(Ad_T(xi)) // assert that T*wedge(xi)*T^-1 is equal to wedge(Ad_T(xi))
TEST(Pose3, Adjoint_hat) TEST(Pose3, Adjoint_hat)
@ -837,16 +912,20 @@ Vector6 testDerivAdjoint(const Vector6& xi, const Vector6& v) {
} }
TEST( Pose3, adjoint) { TEST( Pose3, adjoint) {
Vector expected = testDerivAdjoint(screwPose3::xi, screwPose3::xi); Vector6 v = (Vector6() << 1, 2, 3, 4, 5, 6).finished();
Vector expected = testDerivAdjoint(screwPose3::xi, v);
Matrix actualH; Matrix actualH1, actualH2;
Vector actual = Pose3::adjoint(screwPose3::xi, screwPose3::xi, actualH); Vector actual = Pose3::adjoint(screwPose3::xi, v, actualH1, actualH2);
Matrix numericalH = numericalDerivative21<Vector6, Vector6, Vector6>( Matrix numericalH1 = numericalDerivative21<Vector6, Vector6, Vector6>(
testDerivAdjoint, screwPose3::xi, screwPose3::xi, 1e-5); testDerivAdjoint, screwPose3::xi, v, 1e-5);
Matrix numericalH2 = numericalDerivative22<Vector6, Vector6, Vector6>(
testDerivAdjoint, screwPose3::xi, v, 1e-5);
EXPECT(assert_equal(expected,actual,1e-5)); EXPECT(assert_equal(expected,actual,1e-5));
EXPECT(assert_equal(numericalH,actualH,1e-5)); EXPECT(assert_equal(numericalH1,actualH1,1e-5));
EXPECT(assert_equal(numericalH2,actualH2,1e-5));
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -859,14 +938,17 @@ TEST( Pose3, adjointTranspose) {
Vector v = (Vector(6) << 0.04, 0.05, 0.06, 4.0, 5.0, 6.0).finished(); Vector v = (Vector(6) << 0.04, 0.05, 0.06, 4.0, 5.0, 6.0).finished();
Vector expected = testDerivAdjointTranspose(xi, v); Vector expected = testDerivAdjointTranspose(xi, v);
Matrix actualH; Matrix actualH1, actualH2;
Vector actual = Pose3::adjointTranspose(xi, v, actualH); Vector actual = Pose3::adjointTranspose(xi, v, actualH1, actualH2);
Matrix numericalH = numericalDerivative21<Vector6, Vector6, Vector6>( Matrix numericalH1 = numericalDerivative21<Vector6, Vector6, Vector6>(
testDerivAdjointTranspose, xi, v, 1e-5);
Matrix numericalH2 = numericalDerivative22<Vector6, Vector6, Vector6>(
testDerivAdjointTranspose, xi, v, 1e-5); testDerivAdjointTranspose, xi, v, 1e-5);
EXPECT(assert_equal(expected,actual,1e-15)); EXPECT(assert_equal(expected,actual,1e-15));
EXPECT(assert_equal(numericalH,actualH,1e-5)); EXPECT(assert_equal(numericalH1,actualH1,1e-5));
EXPECT(assert_equal(numericalH2,actualH2,1e-5));
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -110,7 +110,7 @@ class ClusterTree {
typedef sharedCluster sharedNode; typedef sharedCluster sharedNode;
/** concept check */ /** concept check */
GTSAM_CONCEPT_TESTABLE_TYPE(FactorType); GTSAM_CONCEPT_TESTABLE_TYPE(FactorType)
protected: protected:
FastVector<sharedNode> roots_; FastVector<sharedNode> roots_;

View File

@ -36,17 +36,17 @@ namespace gtsam {
// no Ordering is provided. When removing optional from VariableIndex, create VariableIndex // no Ordering is provided. When removing optional from VariableIndex, create VariableIndex
// before creating ordering. // before creating ordering.
VariableIndex computedVariableIndex(asDerived()); VariableIndex computedVariableIndex(asDerived());
return eliminateSequential(function, computedVariableIndex, orderingType); return eliminateSequential(orderingType, function, computedVariableIndex);
} }
else { else {
// Compute an ordering and call this function again. We are guaranteed to have a // Compute an ordering and call this function again. We are guaranteed to have a
// VariableIndex already here because we computed one if needed in the previous 'if' block. // VariableIndex already here because we computed one if needed in the previous 'if' block.
if (orderingType == Ordering::METIS) { if (orderingType == Ordering::METIS) {
Ordering computedOrdering = Ordering::Metis(asDerived()); Ordering computedOrdering = Ordering::Metis(asDerived());
return eliminateSequential(computedOrdering, function, variableIndex, orderingType); return eliminateSequential(computedOrdering, function, variableIndex);
} else { } else {
Ordering computedOrdering = Ordering::Colamd(*variableIndex); Ordering computedOrdering = Ordering::Colamd(*variableIndex);
return eliminateSequential(computedOrdering, function, variableIndex, orderingType); return eliminateSequential(computedOrdering, function, variableIndex);
} }
} }
} }
@ -78,29 +78,31 @@ namespace gtsam {
} }
/* ************************************************************************* */ /* ************************************************************************* */
template<class FACTORGRAPH> template <class FACTORGRAPH>
boost::shared_ptr<typename EliminateableFactorGraph<FACTORGRAPH>::BayesTreeType> boost::shared_ptr<
typename EliminateableFactorGraph<FACTORGRAPH>::BayesTreeType>
EliminateableFactorGraph<FACTORGRAPH>::eliminateMultifrontal( EliminateableFactorGraph<FACTORGRAPH>::eliminateMultifrontal(
OptionalOrderingType orderingType, const Eliminate& function, OptionalOrderingType orderingType, const Eliminate& function,
OptionalVariableIndex variableIndex) const OptionalVariableIndex variableIndex) const {
{ if (!variableIndex) {
if(!variableIndex) { // If no VariableIndex provided, compute one and call this function again
// If no VariableIndex provided, compute one and call this function again IMPORTANT: we check // IMPORTANT: we check for no variable index first so that it's always
// for no variable index first so that it's always computed if we need to call COLAMD because // computed if we need to call COLAMD because no Ordering is provided.
// no Ordering is provided. When removing optional from VariableIndex, create VariableIndex // When removing optional from VariableIndex, create VariableIndex before
// before creating ordering. // creating ordering.
VariableIndex computedVariableIndex(asDerived()); VariableIndex computedVariableIndex(asDerived());
return eliminateMultifrontal(function, computedVariableIndex, orderingType); return eliminateMultifrontal(orderingType, function,
} computedVariableIndex);
else { } else {
// Compute an ordering and call this function again. We are guaranteed to have a // Compute an ordering and call this function again. We are guaranteed to
// VariableIndex already here because we computed one if needed in the previous 'if' block. // have a VariableIndex already here because we computed one if needed in
// the previous 'if' block.
if (orderingType == Ordering::METIS) { if (orderingType == Ordering::METIS) {
Ordering computedOrdering = Ordering::Metis(asDerived()); Ordering computedOrdering = Ordering::Metis(asDerived());
return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType); return eliminateMultifrontal(computedOrdering, function, variableIndex);
} else { } else {
Ordering computedOrdering = Ordering::Colamd(*variableIndex); Ordering computedOrdering = Ordering::Colamd(*variableIndex);
return eliminateMultifrontal(computedOrdering, function, variableIndex, orderingType); return eliminateMultifrontal(computedOrdering, function, variableIndex);
} }
} }
} }
@ -273,7 +275,7 @@ namespace gtsam {
else else
{ {
// No ordering was provided for the unmarginalized variables, so order them with COLAMD. // No ordering was provided for the unmarginalized variables, so order them with COLAMD.
return factorGraph->eliminateSequential(function); return factorGraph->eliminateSequential(Ordering::COLAMD, function);
} }
} }
} }
@ -340,7 +342,7 @@ namespace gtsam {
else else
{ {
// No ordering was provided for the unmarginalized variables, so order them with COLAMD. // No ordering was provided for the unmarginalized variables, so order them with COLAMD.
return factorGraph->eliminateMultifrontal(function); return factorGraph->eliminateMultifrontal(Ordering::COLAMD, function);
} }
} }
} }

View File

@ -288,6 +288,7 @@ namespace gtsam {
FactorGraphType& asDerived() { return static_cast<FactorGraphType&>(*this); } FactorGraphType& asDerived() { return static_cast<FactorGraphType&>(*this); }
public: public:
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
/** \deprecated ordering and orderingType shouldn't both be specified */ /** \deprecated ordering and orderingType shouldn't both be specified */
boost::shared_ptr<BayesNetType> eliminateSequential( boost::shared_ptr<BayesNetType> eliminateSequential(
const Ordering& ordering, const Ordering& ordering,
@ -339,6 +340,7 @@ namespace gtsam {
OptionalVariableIndex variableIndex = boost::none) const { OptionalVariableIndex variableIndex = boost::none) const {
return marginalMultifrontalBayesTree(variables, function, variableIndex); return marginalMultifrontalBayesTree(variables, function, variableIndex);
} }
#endif
}; };
} }

View File

@ -81,7 +81,7 @@ namespace gtsam {
protected: protected:
/** concept check */ /** concept check */
GTSAM_CONCEPT_TESTABLE_TYPE(FactorType); GTSAM_CONCEPT_TESTABLE_TYPE(FactorType)
FastVector<sharedNode> roots_; FastVector<sharedNode> roots_;
FastVector<sharedFactor> remainingFactors_; FastVector<sharedFactor> remainingFactors_;

View File

@ -19,7 +19,6 @@
*/ */
#include <gtsam/linear/GaussianFactorGraph.h> #include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/VectorValues.h>
#include <gtsam/linear/GaussianBayesTree.h> #include <gtsam/linear/GaussianBayesTree.h>
#include <gtsam/linear/GaussianEliminationTree.h> #include <gtsam/linear/GaussianEliminationTree.h>
#include <gtsam/linear/GaussianJunctionTree.h> #include <gtsam/linear/GaussianJunctionTree.h>
@ -290,10 +289,11 @@ namespace gtsam {
return blocks; return blocks;
} }
/* ************************************************************************* */ /* ************************************************************************ */
VectorValues GaussianFactorGraph::optimize(const Eliminate& function) const { VectorValues GaussianFactorGraph::optimize(const Eliminate& function) const {
gttic(GaussianFactorGraph_optimize); gttic(GaussianFactorGraph_optimize);
return BaseEliminateable::eliminateMultifrontal(function)->optimize(); return BaseEliminateable::eliminateMultifrontal(Ordering::COLAMD, function)
->optimize();
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -504,10 +504,32 @@ namespace gtsam {
} }
/* ************************************************************************* */ /* ************************************************************************* */
/** \deprecated */ void GaussianFactorGraph::printErrors(
VectorValues GaussianFactorGraph::optimize(boost::none_t, const VectorValues& values, const std::string& str,
const Eliminate& function) const { const KeyFormatter& keyFormatter,
return optimize(function); const std::function<bool(const Factor* /*factor*/,
double /*whitenedError*/, size_t /*index*/)>&
printCondition) const {
cout << str << "size: " << size() << endl << endl;
for (size_t i = 0; i < (*this).size(); i++) {
const sharedFactor& factor = (*this)[i];
const double errorValue =
(factor != nullptr ? (*this)[i]->error(values) : .0);
if (!printCondition(factor.get(), errorValue, i))
continue; // User-provided filter did not pass
stringstream ss;
ss << "Factor " << i << ": ";
if (factor == nullptr) {
cout << "nullptr"
<< "\n";
} else {
factor->print(ss.str(), keyFormatter);
cout << "error = " << errorValue << "\n";
}
cout << endl; // only one "endl" at end might be faster, \n for each
// factor
}
} }
} // namespace gtsam } // namespace gtsam

View File

@ -21,12 +21,13 @@
#pragma once #pragma once
#include <gtsam/inference/FactorGraph.h>
#include <gtsam/inference/EliminateableFactorGraph.h> #include <gtsam/inference/EliminateableFactorGraph.h>
#include <gtsam/linear/GaussianFactor.h> #include <gtsam/inference/FactorGraph.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/HessianFactor.h>
#include <gtsam/linear/Errors.h> // Included here instead of fw-declared so we can use Errors::iterator #include <gtsam/linear/Errors.h> // Included here instead of fw-declared so we can use Errors::iterator
#include <gtsam/linear/GaussianFactor.h>
#include <gtsam/linear/HessianFactor.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/VectorValues.h>
namespace gtsam { namespace gtsam {
@ -375,6 +376,14 @@ namespace gtsam {
/** In-place version e <- A*x that takes an iterator. */ /** In-place version e <- A*x that takes an iterator. */
void multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const; void multiplyInPlace(const VectorValues& x, const Errors::iterator& e) const;
void printErrors(
const VectorValues& x,
const std::string& str = "GaussianFactorGraph: ",
const KeyFormatter& keyFormatter = DefaultKeyFormatter,
const std::function<bool(const Factor* /*factor*/,
double /*whitenedError*/, size_t /*index*/)>&
printCondition =
[](const Factor*, double, size_t) { return true; }) const;
/// @} /// @}
private: private:
@ -387,9 +396,14 @@ namespace gtsam {
public: public:
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
/** \deprecated */ /** \deprecated */
VectorValues optimize(boost::none_t, VectorValues optimize(boost::none_t,
const Eliminate& function = EliminationTraitsType::DefaultEliminate) const; const Eliminate& function =
EliminationTraitsType::DefaultEliminate) const {
return optimize(function);
}
#endif
}; };

View File

@ -302,6 +302,7 @@ virtual class JacobianFactor : gtsam::GaussianFactor {
void print(string s = "", const gtsam::KeyFormatter& keyFormatter = void print(string s = "", const gtsam::KeyFormatter& keyFormatter =
gtsam::DefaultKeyFormatter) const; gtsam::DefaultKeyFormatter) const;
void printKeys(string s) const; void printKeys(string s) const;
gtsam::KeyVector& keys() const;
bool equals(const gtsam::GaussianFactor& lf, double tol) const; bool equals(const gtsam::GaussianFactor& lf, double tol) const;
size_t size() const; size_t size() const;
Vector unweighted_error(const gtsam::VectorValues& c) const; Vector unweighted_error(const gtsam::VectorValues& c) const;
@ -401,6 +402,7 @@ class GaussianFactorGraph {
// error and probability // error and probability
double error(const gtsam::VectorValues& c) const; double error(const gtsam::VectorValues& c) const;
double probPrime(const gtsam::VectorValues& c) const; double probPrime(const gtsam::VectorValues& c) const;
void printErrors(const gtsam::VectorValues& c, string str = "GaussianFactorGraph: ", const gtsam::KeyFormatter& keyFormatter = gtsam::DefaultKeyFormatter) const;
gtsam::GaussianFactorGraph clone() const; gtsam::GaussianFactorGraph clone() const;
gtsam::GaussianFactorGraph negate() const; gtsam::GaussianFactorGraph negate() const;
@ -513,9 +515,9 @@ virtual class GaussianBayesNet {
size_t size() const; size_t size() const;
// FactorGraph derived interface // FactorGraph derived interface
// size_t size() const;
gtsam::GaussianConditional* at(size_t idx) const; gtsam::GaussianConditional* at(size_t idx) const;
gtsam::KeySet keys() const; gtsam::KeySet keys() const;
gtsam::KeyVector keyVector() const;
bool exists(size_t idx) const; bool exists(size_t idx) const;
void saveGraph(const string& s) const; void saveGraph(const string& s) const;

View File

@ -39,14 +39,14 @@ using namespace gtsam::serializationTestHelpers;
/* ************************************************************************* */ /* ************************************************************************* */
// Export Noisemodels // Export Noisemodels
// See http://www.boost.org/doc/libs/1_32_0/libs/serialization/doc/special.html // See http://www.boost.org/doc/libs/1_32_0/libs/serialization/doc/special.html
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
/* ************************************************************************* */ /* ************************************************************************* */
// example noise models // example noise models
@ -129,9 +129,9 @@ TEST (Serialization, SharedDiagonal_noiseModels) {
/* Create GUIDs for factors */ /* Create GUIDs for factors */
/* ************************************************************************* */ /* ************************************************************************* */
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor"); BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor")
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor"); BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor")
BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional"); BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional")
/* ************************************************************************* */ /* ************************************************************************* */
TEST (Serialization, linear_factors) { TEST (Serialization, linear_factors) {

View File

@ -257,5 +257,5 @@ std::ostream& operator<<(std::ostream& os, const CombinedImuFactor& f) {
/// namespace gtsam /// namespace gtsam
/// Boost serialization export definition for derived class /// Boost serialization export definition for derived class
BOOST_CLASS_EXPORT_IMPLEMENT(gtsam::PreintegrationCombinedParams); BOOST_CLASS_EXPORT_IMPLEMENT(gtsam::PreintegrationCombinedParams)

View File

@ -353,4 +353,4 @@ struct traits<CombinedImuFactor> : public Testable<CombinedImuFactor> {};
} // namespace gtsam } // namespace gtsam
/// Add Boost serialization export key (declaration) for derived class /// Add Boost serialization export key (declaration) for derived class
BOOST_CLASS_EXPORT_KEY(gtsam::PreintegrationCombinedParams); BOOST_CLASS_EXPORT_KEY(gtsam::PreintegrationCombinedParams)

View File

@ -88,6 +88,8 @@ virtual class PreintegratedRotationParams {
virtual class PreintegrationParams : gtsam::PreintegratedRotationParams { virtual class PreintegrationParams : gtsam::PreintegratedRotationParams {
PreintegrationParams(Vector n_gravity); PreintegrationParams(Vector n_gravity);
gtsam::Vector n_gravity;
static gtsam::PreintegrationParams* MakeSharedD(double g); static gtsam::PreintegrationParams* MakeSharedD(double g);
static gtsam::PreintegrationParams* MakeSharedU(double g); static gtsam::PreintegrationParams* MakeSharedU(double g);
static gtsam::PreintegrationParams* MakeSharedD(); // default g = 9.81 static gtsam::PreintegrationParams* MakeSharedD(); // default g = 9.81

View File

@ -66,7 +66,7 @@ TEST( Rot3AttitudeFactor, Constructor ) {
/* ************************************************************************* */ /* ************************************************************************* */
// Export Noisemodels // Export Noisemodels
// See http://www.boost.org/doc/libs/1_32_0/libs/serialization/doc/special.html // See http://www.boost.org/doc/libs/1_32_0/libs/serialization/doc/special.html
BOOST_CLASS_EXPORT(gtsam::noiseModel::Isotropic); BOOST_CLASS_EXPORT(gtsam::noiseModel::Isotropic)
/* ************************************************************************* */ /* ************************************************************************* */
TEST(Rot3AttitudeFactor, Serialization) { TEST(Rot3AttitudeFactor, Serialization) {

View File

@ -72,7 +72,7 @@ TEST( GPSFactor, Constructor ) {
// Calculate numerical derivatives // Calculate numerical derivatives
Matrix expectedH = numericalDerivative11<Vector,Pose3>( Matrix expectedH = numericalDerivative11<Vector,Pose3>(
std::bind(&GPSFactor::evaluateError, &factor, _1, boost::none), T); std::bind(&GPSFactor::evaluateError, &factor, std::placeholders::_1, boost::none), T);
// Use the factor to calculate the derivative // Use the factor to calculate the derivative
Matrix actualH; Matrix actualH;
@ -101,7 +101,7 @@ TEST( GPSFactor2, Constructor ) {
// Calculate numerical derivatives // Calculate numerical derivatives
Matrix expectedH = numericalDerivative11<Vector,NavState>( Matrix expectedH = numericalDerivative11<Vector,NavState>(
std::bind(&GPSFactor2::evaluateError, &factor, _1, boost::none), T); std::bind(&GPSFactor2::evaluateError, &factor, std::placeholders::_1, boost::none), T);
// Use the factor to calculate the derivative // Use the factor to calculate the derivative
Matrix actualH; Matrix actualH;

View File

@ -31,16 +31,16 @@ using namespace gtsam;
using namespace gtsam::serializationTestHelpers; using namespace gtsam::serializationTestHelpers;
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained,
"gtsam_noiseModel_Constrained"); "gtsam_noiseModel_Constrained")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal,
"gtsam_noiseModel_Diagonal"); "gtsam_noiseModel_Diagonal")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian,
"gtsam_noiseModel_Gaussian"); "gtsam_noiseModel_Gaussian")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,
"gtsam_noiseModel_Isotropic"); "gtsam_noiseModel_Isotropic")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
template <typename P> template <typename P>
P getPreintegratedMeasurements() { P getPreintegratedMeasurements() {

View File

@ -64,7 +64,7 @@ TEST( MagFactor, unrotate ) {
Point3 expected(22735.5, 314.502, 44202.5); Point3 expected(22735.5, 314.502, 44202.5);
EXPECT( assert_equal(expected, MagFactor::unrotate(theta,nM,H),1e-1)); EXPECT( assert_equal(expected, MagFactor::unrotate(theta,nM,H),1e-1));
EXPECT( assert_equal(numericalDerivative11<Point3,Rot2> // EXPECT( assert_equal(numericalDerivative11<Point3,Rot2> //
(std::bind(&MagFactor::unrotate, _1, nM, none), theta), H, 1e-6)); (std::bind(&MagFactor::unrotate, std::placeholders::_1, nM, none), theta), H, 1e-6));
} }
// ************************************************************************* // *************************************************************************
@ -76,35 +76,35 @@ TEST( MagFactor, Factors ) {
MagFactor f(1, measured, s, dir, bias, model); MagFactor f(1, measured, s, dir, bias, model);
EXPECT( assert_equal(Z_3x1,f.evaluateError(theta,H1),1e-5)); EXPECT( assert_equal(Z_3x1,f.evaluateError(theta,H1),1e-5));
EXPECT( assert_equal((Matrix)numericalDerivative11<Vector,Rot2> // EXPECT( assert_equal((Matrix)numericalDerivative11<Vector,Rot2> //
(std::bind(&MagFactor::evaluateError, &f, _1, none), theta), H1, 1e-7)); (std::bind(&MagFactor::evaluateError, &f, std::placeholders::_1, none), theta), H1, 1e-7));
// MagFactor1 // MagFactor1
MagFactor1 f1(1, measured, s, dir, bias, model); MagFactor1 f1(1, measured, s, dir, bias, model);
EXPECT( assert_equal(Z_3x1,f1.evaluateError(nRb,H1),1e-5)); EXPECT( assert_equal(Z_3x1,f1.evaluateError(nRb,H1),1e-5));
EXPECT( assert_equal(numericalDerivative11<Vector,Rot3> // EXPECT( assert_equal(numericalDerivative11<Vector,Rot3> //
(std::bind(&MagFactor1::evaluateError, &f1, _1, none), nRb), H1, 1e-7)); (std::bind(&MagFactor1::evaluateError, &f1, std::placeholders::_1, none), nRb), H1, 1e-7));
// MagFactor2 // MagFactor2
MagFactor2 f2(1, 2, measured, nRb, model); MagFactor2 f2(1, 2, measured, nRb, model);
EXPECT( assert_equal(Z_3x1,f2.evaluateError(scaled,bias,H1,H2),1e-5)); EXPECT( assert_equal(Z_3x1,f2.evaluateError(scaled,bias,H1,H2),1e-5));
EXPECT( assert_equal(numericalDerivative11<Vector,Point3> // EXPECT( assert_equal(numericalDerivative11<Vector,Point3> //
(std::bind(&MagFactor2::evaluateError, &f2, _1, bias, none, none), scaled),// (std::bind(&MagFactor2::evaluateError, &f2, std::placeholders::_1, bias, none, none), scaled),//
H1, 1e-7)); H1, 1e-7));
EXPECT( assert_equal(numericalDerivative11<Vector,Point3> // EXPECT( assert_equal(numericalDerivative11<Vector,Point3> //
(std::bind(&MagFactor2::evaluateError, &f2, scaled, _1, none, none), bias),// (std::bind(&MagFactor2::evaluateError, &f2, scaled, std::placeholders::_1, none, none), bias),//
H2, 1e-7)); H2, 1e-7));
// MagFactor2 // MagFactor2
MagFactor3 f3(1, 2, 3, measured, nRb, model); MagFactor3 f3(1, 2, 3, measured, nRb, model);
EXPECT(assert_equal(Z_3x1,f3.evaluateError(s,dir,bias,H1,H2,H3),1e-5)); EXPECT(assert_equal(Z_3x1,f3.evaluateError(s,dir,bias,H1,H2,H3),1e-5));
EXPECT(assert_equal((Matrix)numericalDerivative11<Vector,double> // EXPECT(assert_equal((Matrix)numericalDerivative11<Vector,double> //
(std::bind(&MagFactor3::evaluateError, &f3, _1, dir, bias, none, none, none), s),// (std::bind(&MagFactor3::evaluateError, &f3, std::placeholders::_1, dir, bias, none, none, none), s),//
H1, 1e-7)); H1, 1e-7));
EXPECT(assert_equal(numericalDerivative11<Vector,Unit3> // EXPECT(assert_equal(numericalDerivative11<Vector,Unit3> //
(std::bind(&MagFactor3::evaluateError, &f3, s, _1, bias, none, none, none), dir),// (std::bind(&MagFactor3::evaluateError, &f3, s, std::placeholders::_1, bias, none, none, none), dir),//
H2, 1e-7)); H2, 1e-7));
EXPECT(assert_equal(numericalDerivative11<Vector,Point3> // EXPECT(assert_equal(numericalDerivative11<Vector,Point3> //
(std::bind(&MagFactor3::evaluateError, &f3, s, dir, _1, none, none, none), bias),// (std::bind(&MagFactor3::evaluateError, &f3, s, dir, std::placeholders::_1, none, none, none), bias),//
H3, 1e-7)); H3, 1e-7));
} }

View File

@ -101,4 +101,4 @@ private:
} }
}; };
}; }

View File

@ -246,6 +246,18 @@ struct apply_compose {
return x.compose(y, H1, H2); return x.compose(y, H1, H2);
} }
}; };
template <>
struct apply_compose<double> {
double operator()(const double& x, const double& y,
OptionalJacobian<1, 1> H1 = boost::none,
OptionalJacobian<1, 1> H2 = boost::none) const {
if (H1) H1->setConstant(y);
if (H2) H2->setConstant(x);
return x * y;
}
};
} }
// Global methods: // Global methods:

View File

@ -142,8 +142,9 @@ class GTSAM_EXPORT GncOptimizer {
* provides an extra interface for the user to initialize the weightst * provides an extra interface for the user to initialize the weightst
* */ * */
void setWeights(const Vector w) { void setWeights(const Vector w) {
if(w.size() != nfg_.size()){ if (size_t(w.size()) != nfg_.size()) {
throw std::runtime_error("GncOptimizer::setWeights: the number of specified weights" throw std::runtime_error(
"GncOptimizer::setWeights: the number of specified weights"
" does not match the size of the factor graph."); " does not match the size of the factor graph.");
} }
weights_ = w; weights_ = w;

View File

@ -80,11 +80,15 @@ Marginals::Marginals(const GaussianFactorGraph& graph, const VectorValues& solut
/* ************************************************************************* */ /* ************************************************************************* */
void Marginals::computeBayesTree() { void Marginals::computeBayesTree() {
// The default ordering to use.
const Ordering::OrderingType defaultOrderingType = Ordering::COLAMD;
// Compute BayesTree // Compute BayesTree
if(factorization_ == CHOLESKY) if (factorization_ == CHOLESKY)
bayesTree_ = *graph_.eliminateMultifrontal(EliminatePreferCholesky); bayesTree_ = *graph_.eliminateMultifrontal(defaultOrderingType,
else if(factorization_ == QR) EliminatePreferCholesky);
bayesTree_ = *graph_.eliminateMultifrontal(EliminateQR); else if (factorization_ == QR)
bayesTree_ =
*graph_.eliminateMultifrontal(defaultOrderingType, EliminateQR);
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -131,6 +131,7 @@ protected:
void computeBayesTree(const Ordering& ordering); void computeBayesTree(const Ordering& ordering);
public: public:
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
/** \deprecated argument order changed due to removing boost::optional<Ordering> */ /** \deprecated argument order changed due to removing boost::optional<Ordering> */
Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization, Marginals(const NonlinearFactorGraph& graph, const Values& solution, Factorization factorization,
const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {} const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {}
@ -142,6 +143,7 @@ public:
/** \deprecated argument order changed due to removing boost::optional<Ordering> */ /** \deprecated argument order changed due to removing boost::optional<Ordering> */
Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, Factorization factorization, Marginals(const GaussianFactorGraph& graph, const VectorValues& solution, Factorization factorization,
const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {} const Ordering& ordering) : Marginals(graph, solution, ordering, factorization) {}
#endif
}; };

View File

@ -149,7 +149,7 @@ boost::tuple<V, int> nonlinearConjugateGradient(const S &system,
const V &initial, const NonlinearOptimizerParams &params, const V &initial, const NonlinearOptimizerParams &params,
const bool singleIteration, const bool gradientDescent = false) { const bool singleIteration, const bool gradientDescent = false) {
// GTSAM_CONCEPT_MANIFOLD_TYPE(V); // GTSAM_CONCEPT_MANIFOLD_TYPE(V)
size_t iteration = 0; size_t iteration = 0;

View File

@ -219,7 +219,6 @@ protected:
X value_; /// fixed value for variable X value_; /// fixed value for variable
GTSAM_CONCEPT_MANIFOLD_TYPE(X) GTSAM_CONCEPT_MANIFOLD_TYPE(X)
GTSAM_CONCEPT_TESTABLE_TYPE(X) GTSAM_CONCEPT_TESTABLE_TYPE(X)
public: public:

View File

@ -265,6 +265,7 @@ namespace gtsam {
public: public:
#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V41
/** \deprecated */ /** \deprecated */
boost::shared_ptr<HessianFactor> linearizeToHessianFactor( boost::shared_ptr<HessianFactor> linearizeToHessianFactor(
const Values& values, boost::none_t, const Dampen& dampen = nullptr) const const Values& values, boost::none_t, const Dampen& dampen = nullptr) const
@ -274,6 +275,7 @@ namespace gtsam {
Values updateCholesky(const Values& values, boost::none_t, Values updateCholesky(const Values& values, boost::none_t,
const Dampen& dampen = nullptr) const const Dampen& dampen = nullptr) const
{return updateCholesky(values, dampen);} {return updateCholesky(values, dampen);}
#endif
}; };

View File

@ -147,11 +147,13 @@ VectorValues NonlinearOptimizer::solve(const GaussianFactorGraph& gfg,
} else if (params.isSequential()) { } else if (params.isSequential()) {
// Sequential QR or Cholesky (decided by params.getEliminationFunction()) // Sequential QR or Cholesky (decided by params.getEliminationFunction())
if (params.ordering) if (params.ordering)
delta = gfg.eliminateSequential(*params.ordering, params.getEliminationFunction(), delta = gfg.eliminateSequential(*params.ordering,
boost::none, params.orderingType)->optimize(); params.getEliminationFunction())
->optimize();
else else
delta = gfg.eliminateSequential(params.getEliminationFunction(), boost::none, delta = gfg.eliminateSequential(params.orderingType,
params.orderingType)->optimize(); params.getEliminationFunction())
->optimize();
} else if (params.isIterative()) { } else if (params.isIterative()) {
// Conjugate Gradient -> needs params.iterativeParams // Conjugate Gradient -> needs params.iterativeParams
if (!params.iterativeParams) if (!params.iterativeParams)

View File

@ -36,7 +36,6 @@ namespace gtsam {
*/ */
JacobianFactor linearizeNumerically(const NoiseModelFactor& factor, JacobianFactor linearizeNumerically(const NoiseModelFactor& factor,
const Values& values, double delta = 1e-5) { const Values& values, double delta = 1e-5) {
// We will fill a vector of key/Jacobians pairs (a map would sort) // We will fill a vector of key/Jacobians pairs (a map would sort)
std::vector<std::pair<Key, Matrix> > jacobians; std::vector<std::pair<Key, Matrix> > jacobians;
@ -46,24 +45,24 @@ JacobianFactor linearizeNumerically(const NoiseModelFactor& factor,
// Loop over all variables // Loop over all variables
const double one_over_2delta = 1.0 / (2.0 * delta); const double one_over_2delta = 1.0 / (2.0 * delta);
for(Key key: factor) { for (Key key : factor) {
// Compute central differences using the values struct. // Compute central differences using the values struct.
VectorValues dX = values.zeroVectors(); VectorValues dX = values.zeroVectors();
const size_t cols = dX.dim(key); const size_t cols = dX.dim(key);
Matrix J = Matrix::Zero(rows, cols); Matrix J = Matrix::Zero(rows, cols);
for (size_t col = 0; col < cols; ++col) { for (size_t col = 0; col < cols; ++col) {
Eigen::VectorXd dx = Eigen::VectorXd::Zero(cols); Eigen::VectorXd dx = Eigen::VectorXd::Zero(cols);
dx[col] = delta; dx(col) = delta;
dX[key] = dx; dX[key] = dx;
Values eval_values = values.retract(dX); Values eval_values = values.retract(dX);
const Eigen::VectorXd left = factor.whitenedError(eval_values); const Eigen::VectorXd left = factor.whitenedError(eval_values);
dx[col] = -delta; dx(col) = -delta;
dX[key] = dx; dX[key] = dx;
eval_values = values.retract(dX); eval_values = values.retract(dX);
const Eigen::VectorXd right = factor.whitenedError(eval_values); const Eigen::VectorXd right = factor.whitenedError(eval_values);
J.col(col) = (left - right) * one_over_2delta; J.col(col) = (left - right) * one_over_2delta;
} }
jacobians.push_back(std::make_pair(key,J)); jacobians.push_back(std::make_pair(key, J));
} }
// Next step...return JacobianFactor // Next step...return JacobianFactor

View File

@ -115,6 +115,10 @@ class Ordering {
Ordering(); Ordering();
Ordering(const gtsam::Ordering& other); Ordering(const gtsam::Ordering& other);
template <FACTOR_GRAPH = {gtsam::NonlinearFactorGraph,
gtsam::GaussianFactorGraph}>
static gtsam::Ordering Colamd(const FACTOR_GRAPH& graph);
// Testable // Testable
void print(string s = "", const gtsam::KeyFormatter& keyFormatter = void print(string s = "", const gtsam::KeyFormatter& keyFormatter =
gtsam::DefaultKeyFormatter) const; gtsam::DefaultKeyFormatter) const;

View File

@ -293,6 +293,19 @@ TEST(Expression, compose3) {
EXPECT(expected == R3.keys()); EXPECT(expected == R3.keys());
} }
/* ************************************************************************* */
// Test compose with double type (should be multiplication).
TEST(Expression, compose4) {
// Create expression
gtsam::Key key = 1;
Double_ R1(key), R2(key);
Double_ R3 = R1 * R2;
// Check keys
set<Key> expected = list_of(1);
EXPECT(expected == R3.keys());
}
/* ************************************************************************* */ /* ************************************************************************* */
// Test with ternary function. // Test with ternary function.
Rot3 composeThree(const Rot3& R1, const Rot3& R2, const Rot3& R3, OptionalJacobian<3, 3> H1, Rot3 composeThree(const Rot3& R1, const Rot3& R2, const Rot3& R3, OptionalJacobian<3, 3> H1,

View File

@ -35,37 +35,37 @@ using namespace gtsam::serializationTestHelpers;
/* ************************************************************************* */ /* ************************************************************************* */
// Create GUIDs for Noisemodels // Create GUIDs for Noisemodels
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,"gtsam_noiseModel_Isotropic"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic,"gtsam_noiseModel_Isotropic")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
/* ************************************************************************* */ /* ************************************************************************* */
// Create GUIDs for factors // Create GUIDs for factors
BOOST_CLASS_EXPORT_GUID(gtsam::PriorFactor<gtsam::Pose3>, "gtsam::PriorFactor<gtsam::Pose3>"); BOOST_CLASS_EXPORT_GUID(gtsam::PriorFactor<gtsam::Pose3>, "gtsam::PriorFactor<gtsam::Pose3>")
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor"); BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor")
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor"); BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor")
BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional"); BOOST_CLASS_EXPORT_GUID(gtsam::GaussianConditional , "gtsam::GaussianConditional")
/* ************************************************************************* */ /* ************************************************************************* */
// Export all classes derived from Value // Export all classes derived from Value
GTSAM_VALUE_EXPORT(gtsam::Cal3_S2); GTSAM_VALUE_EXPORT(gtsam::Cal3_S2)
GTSAM_VALUE_EXPORT(gtsam::Cal3Bundler); GTSAM_VALUE_EXPORT(gtsam::Cal3Bundler)
GTSAM_VALUE_EXPORT(gtsam::Point3); GTSAM_VALUE_EXPORT(gtsam::Point3)
GTSAM_VALUE_EXPORT(gtsam::Pose3); GTSAM_VALUE_EXPORT(gtsam::Pose3)
GTSAM_VALUE_EXPORT(gtsam::Rot3); GTSAM_VALUE_EXPORT(gtsam::Rot3)
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3_S2>); GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3_S2>)
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3DS2>); GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3DS2>)
GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3Bundler>); GTSAM_VALUE_EXPORT(gtsam::PinholeCamera<Cal3Bundler>)
namespace detail { namespace detail {
template<class T> struct pack { template<class T> struct pack {

View File

@ -59,8 +59,8 @@ namespace gtsam {
template<class CAMERA, class LANDMARK> template<class CAMERA, class LANDMARK>
class GeneralSFMFactor: public NoiseModelFactor2<CAMERA, LANDMARK> { class GeneralSFMFactor: public NoiseModelFactor2<CAMERA, LANDMARK> {
GTSAM_CONCEPT_MANIFOLD_TYPE(CAMERA); GTSAM_CONCEPT_MANIFOLD_TYPE(CAMERA)
GTSAM_CONCEPT_MANIFOLD_TYPE(LANDMARK); GTSAM_CONCEPT_MANIFOLD_TYPE(LANDMARK)
static const int DimC = FixedDimension<CAMERA>::value; static const int DimC = FixedDimension<CAMERA>::value;
static const int DimL = FixedDimension<LANDMARK>::value; static const int DimL = FixedDimension<LANDMARK>::value;
@ -202,7 +202,7 @@ struct traits<GeneralSFMFactor<CAMERA, LANDMARK> > : Testable<
template<class CALIBRATION> template<class CALIBRATION>
class GeneralSFMFactor2: public NoiseModelFactor3<Pose3, Point3, CALIBRATION> { class GeneralSFMFactor2: public NoiseModelFactor3<Pose3, Point3, CALIBRATION> {
GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION); GTSAM_CONCEPT_MANIFOLD_TYPE(CALIBRATION)
static const int DimK = FixedDimension<CALIBRATION>::value; static const int DimK = FixedDimension<CALIBRATION>::value;
protected: protected:

View File

@ -9,20 +9,21 @@
namespace gtsam { namespace gtsam {
/** /**
* JacobianFactor for Schur complement that uses the "Nullspace Trick" by Mourikis * JacobianFactor for Schur complement that uses the "Nullspace Trick" by
* Mourikis et al.
* *
* This trick is equivalent to the Schur complement, but can be faster. * This trick is equivalent to the Schur complement, but can be faster.
* In essence, the linear factor |E*dp + F*dX - b|, where p is point and X are poses, * In essence, the linear factor |E*dp + F*dX - b|, where p is point and X are
* is multiplied by Enull, a matrix that spans the left nullspace of E, i.e., * poses, is multiplied by Enull, a matrix that spans the left nullspace of E,
* The mx3 matrix is analyzed with SVD as E = [Erange Enull]*S*V (mxm * mx3 * 3x3) * i.e., The mx3 matrix is analyzed with SVD as E = [Erange Enull]*S*V (mxm *
* where Enull is an m x (m-3) matrix * mx3 * 3x3) where Enull is an m x (m-3) matrix Then Enull'*E*dp = 0, and
* Then Enull'*E*dp = 0, and
* |Enull'*E*dp + Enull'*F*dX - Enull'*b| == |Enull'*F*dX - Enull'*b| * |Enull'*E*dp + Enull'*F*dX - Enull'*b| == |Enull'*F*dX - Enull'*b|
* Normally F is m x 6*numKeys, and Enull'*F yields an (m-3) x 6*numKeys matrix. * Normally F is m x 6*numKeys, and Enull'*F yields an (m-3) x 6*numKeys matrix.
* *
* The code below assumes that F is block diagonal and is given as a vector of ZDim*D blocks. * The code below assumes that F is block diagonal and is given as a vector of
* Example: m = 4 (2 measurements), Enull = 4*1, F = 4*12 (for D=6) * ZDim*D blocks. Example: m = 4 (2 measurements), Enull = 4*1, F = 4*12 (for
* Then Enull'*F = 1*4 * 4*12 = 1*12, but each 1*6 piece can be computed as a 1x2 * 2x6 mult * D=6) Then Enull'*F = 1*4 * 4*12 = 1*12, but each 1*6 piece can be computed as
* a 1x2 * 2x6 multiplication.
*/ */
template<size_t D, size_t ZDim> template<size_t D, size_t ZDim>
class JacobianFactorSVD: public RegularJacobianFactor<D> { class JacobianFactorSVD: public RegularJacobianFactor<D> {
@ -37,10 +38,10 @@ public:
JacobianFactorSVD() { JacobianFactorSVD() {
} }
/// Empty constructor with keys /// Empty constructor with keys.
JacobianFactorSVD(const KeyVector& keys, // JacobianFactorSVD(const KeyVector& keys,
const SharedDiagonal& model = SharedDiagonal()) : const SharedDiagonal& model = SharedDiagonal())
Base() { : Base() {
Matrix zeroMatrix = Matrix::Zero(0, D); Matrix zeroMatrix = Matrix::Zero(0, D);
Vector zeroVector = Vector::Zero(0); Vector zeroVector = Vector::Zero(0);
std::vector<KeyMatrix> QF; std::vector<KeyMatrix> QF;
@ -51,18 +52,21 @@ public:
} }
/** /**
* @brief Constructor * @brief Construct a new JacobianFactorSVD object, createing a reduced-rank
* Takes the CameraSet derivatives (as ZDim*D blocks of block-diagonal F) * Jacobian factor on the CameraSet.
* and a reduced point derivative, Enull
* and creates a reduced-rank Jacobian factor on the CameraSet
* *
* @Fblocks: * @param keys keys associated with F blocks.
* @param Fblocks CameraSet derivatives, ZDim*D blocks of block-diagonal F
* @param Enull a reduced point derivative
* @param b right-hand side
* @param model noise model
*/ */
JacobianFactorSVD(const KeyVector& keys, JacobianFactorSVD(
const std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> >& Fblocks, const Matrix& Enull, const KeyVector& keys,
const Vector& b, // const std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> >& Fblocks,
const SharedDiagonal& model = SharedDiagonal()) : const Matrix& Enull, const Vector& b,
Base() { const SharedDiagonal& model = SharedDiagonal())
: Base() {
size_t numKeys = Enull.rows() / ZDim; size_t numKeys = Enull.rows() / ZDim;
size_t m2 = ZDim * numKeys - 3; // TODO: is this not just Enull.rows()? size_t m2 = ZDim * numKeys - 3; // TODO: is this not just Enull.rows()?
// PLAIN nullptr SPACE TRICK // PLAIN nullptr SPACE TRICK
@ -74,9 +78,8 @@ public:
QF.reserve(numKeys); QF.reserve(numKeys);
for (size_t k = 0; k < Fblocks.size(); ++k) { for (size_t k = 0; k < Fblocks.size(); ++k) {
Key key = keys[k]; Key key = keys[k];
QF.push_back( QF.emplace_back(
KeyMatrix(key, key, (Enull.transpose()).block(0, ZDim * k, m2, ZDim) * Fblocks[k]);
(Enull.transpose()).block(0, ZDim * k, m2, ZDim) * Fblocks[k]));
} }
JacobianFactor::fillTerms(QF, Enull.transpose() * b, model); JacobianFactor::fillTerms(QF, Enull.transpose() * b, model);
} }

View File

@ -11,7 +11,7 @@
/** /**
* @file ProjectionFactor.h * @file ProjectionFactor.h
* @brief Basic bearing factor from 2D measurement * @brief Reprojection of a LANDMARK to a 2D point.
* @author Chris Beall * @author Chris Beall
* @author Richard Roberts * @author Richard Roberts
* @author Frank Dellaert * @author Frank Dellaert
@ -22,17 +22,21 @@
#include <gtsam/nonlinear/NonlinearFactor.h> #include <gtsam/nonlinear/NonlinearFactor.h>
#include <gtsam/geometry/PinholeCamera.h> #include <gtsam/geometry/PinholeCamera.h>
#include <gtsam/geometry/Pose3.h>
#include <gtsam/geometry/Point3.h>
#include <gtsam/geometry/Cal3_S2.h> #include <gtsam/geometry/Cal3_S2.h>
#include <boost/optional.hpp> #include <boost/optional.hpp>
namespace gtsam { namespace gtsam {
/** /**
* Non-linear factor for a constraint derived from a 2D measurement. The calibration is known here. * Non-linear factor for a constraint derived from a 2D measurement.
* i.e. the main building block for visual SLAM. * The calibration is known here.
* The main building block for visual SLAM.
* @addtogroup SLAM * @addtogroup SLAM
*/ */
template<class POSE, class LANDMARK, class CALIBRATION = Cal3_S2> template <class POSE = Pose3, class LANDMARK = Point3,
class CALIBRATION = Cal3_S2>
class GenericProjectionFactor: public NoiseModelFactor2<POSE, LANDMARK> { class GenericProjectionFactor: public NoiseModelFactor2<POSE, LANDMARK> {
protected: protected:

68
gtsam/slam/ReadMe.md Normal file
View File

@ -0,0 +1,68 @@
# SLAM Factors
## GenericProjectionFactor (defined in ProjectionFactor.h)
Non-linear factor that minimizes the re-projection error with respect to a 2D measurement.
The calibration is assumed known and passed in the constructor.
The main building block for visual SLAM.
Templated on
- `POSE`, default `Pose3`
- `LANDMARK`, default `Point3`
- `CALIBRATION`, default `Cal3_S2`
## SmartFactors
These are "structure-less" factors, i.e., rather than introducing a new variable for an observed 3D point or landmark, a single factor is created that provides a multi-view constraint on several poses and/or cameras.
While one typically adds multiple GenericProjectionFactors (one for each observation of a landmark), a SmartFactor collects all measurements for a landmark, i.e., the factor graph contains 1 smart factor per landmark.
### SmartFactorBase
This is the base class for smart factors, templated on a `CAMERA` type.
It has no internal point, but it saves the measurements, keeps a noise model, and an optional sensor pose.
### SmartProjectionFactor
Also templated on `CAMERA`. Triangulates a 3D point and keeps an estimate of it around.
This factor operates with monocular cameras, and is used to optimize the camera pose
*and* calibration for each camera, and requires variables of type `CAMERA` in values.
If the calibration is fixed use `SmartProjectionPoseFactor` instead!
### SmartProjectionPoseFactor
Derives from `SmartProjectionFactor` but is templated on a `CALIBRATION` type, setting `CAMERA = PinholePose<CALIBRATION>`.
This factor assumes that the camera calibration is fixed and the same for all cameras involved in this factor.
The factor only constrains poses.
If the calibration should be optimized, as well, use `SmartProjectionFactor` instead!
### SmartProjectionRigFactor
Same as `SmartProjectionPoseFactor`, except:
- it is templated on `CAMERA`, i.e., it allows cameras beyond pinhole;
- it allows measurements from multiple cameras, each camera with fixed but potentially different intrinsics and extrinsics;
- it allows multiple observations from the same pose/key, again, to model a multi-camera system.
## Linearized Smart Factors
The factors below are less likely to be relevant to the user, but result from using the non-linear smart factors above.
### RegularImplicitSchurFactor
A specialization of a GaussianFactor to structure-less SFM, which is very fast in a conjugate gradient (CG) solver.
It is produced by calling `createRegularImplicitSchurFactor` in `SmartFactorBase` or `SmartProjectionFactor`.
### JacobianFactorQ
A RegularJacobianFactor that uses some badly documented reduction on the Jacobians.
### JacobianFactorQR
A RegularJacobianFactor that eliminates a point using sequential elimination.
### JacobianFactorQR
A RegularJacobianFactor that uses the "Nullspace Trick" by Mourikis et al. See the documentation in the file, which *is* well documented.

View File

@ -1,6 +1,6 @@
/** /**
* @file RegularImplicitSchurFactor.h * @file RegularImplicitSchurFactor.h
* @brief A new type of linear factor (GaussianFactor), which is subclass of GaussianFactor * @brief A subclass of GaussianFactor specialized to structureless SFM.
* @author Frank Dellaert * @author Frank Dellaert
* @author Luca Carlone * @author Luca Carlone
*/ */
@ -20,6 +20,20 @@ namespace gtsam {
/** /**
* RegularImplicitSchurFactor * RegularImplicitSchurFactor
*
* A specialization of a GaussianFactor to structure-less SFM, which is very
* fast in a conjugate gradient (CG) solver. Specifically, as measured in
* timeSchurFactors.cpp, it stays very fast for an increasing number of cameras.
* The magic is in multiplyHessianAdd, which does the Hessian-vector multiply at
* the core of CG, and implements
* y += F'*alpha*(I - E*P*E')*F*x
* where
* - F is the 2mx6m Jacobian of the m 2D measurements wrpt m 6DOF poses
* - E is the 2mx3 Jacabian of the m 2D measurements wrpt a 3D point
* - P is the covariance on the point
* The equation above implicitly executes the Schur complement by removing the
* information E*P*E' from the Hessian. It is also very fast as we do not use
* the full 6m*6m F matrix, but rather only it's m 6x6 diagonal blocks.
*/ */
template<class CAMERA> template<class CAMERA>
class RegularImplicitSchurFactor: public GaussianFactor { class RegularImplicitSchurFactor: public GaussianFactor {
@ -38,9 +52,10 @@ protected:
static const int ZDim = traits<Z>::dimension; ///< Measurement dimension static const int ZDim = traits<Z>::dimension; ///< Measurement dimension
typedef Eigen::Matrix<double, ZDim, D> MatrixZD; ///< type of an F block typedef Eigen::Matrix<double, ZDim, D> MatrixZD; ///< type of an F block
typedef Eigen::Matrix<double, D, D> MatrixDD; ///< camera hessian typedef Eigen::Matrix<double, D, D> MatrixDD; ///< camera Hessian
typedef std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> > FBlocks;
const std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> > FBlocks_; ///< All ZDim*D F blocks (one for each camera) FBlocks FBlocks_; ///< All ZDim*D F blocks (one for each camera)
const Matrix PointCovariance_; ///< the 3*3 matrix P = inv(E'E) (2*2 if degenerate) const Matrix PointCovariance_; ///< the 3*3 matrix P = inv(E'E) (2*2 if degenerate)
const Matrix E_; ///< The 2m*3 E Jacobian with respect to the point const Matrix E_; ///< The 2m*3 E Jacobian with respect to the point
const Vector b_; ///< 2m-dimensional RHS vector const Vector b_; ///< 2m-dimensional RHS vector
@ -52,17 +67,25 @@ public:
} }
/// Construct from blocks of F, E, inv(E'*E), and RHS vector b /// Construct from blocks of F, E, inv(E'*E), and RHS vector b
RegularImplicitSchurFactor(const KeyVector& keys,
const std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> >& FBlocks, const Matrix& E, const Matrix& P, /**
const Vector& b) : * @brief Construct a new RegularImplicitSchurFactor object.
GaussianFactor(keys), FBlocks_(FBlocks), PointCovariance_(P), E_(E), b_(b) { *
} * @param keys keys corresponding to cameras
* @param Fs All ZDim*D F blocks (one for each camera)
* @param E Jacobian of measurements wrpt point.
* @param P point covariance matrix
* @param b RHS vector
*/
RegularImplicitSchurFactor(const KeyVector& keys, const FBlocks& Fs,
const Matrix& E, const Matrix& P, const Vector& b)
: GaussianFactor(keys), FBlocks_(Fs), PointCovariance_(P), E_(E), b_(b) {}
/// Destructor /// Destructor
~RegularImplicitSchurFactor() override { ~RegularImplicitSchurFactor() override {
} }
std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD> >& FBlocks() const { const FBlocks& Fs() const {
return FBlocks_; return FBlocks_;
} }

View File

@ -37,12 +37,14 @@
namespace gtsam { namespace gtsam {
/** /**
* @brief Base class for smart factors * @brief Base class for smart factors.
* This base class has no internal point, but it has a measurement, noise model * This base class has no internal point, but it has a measurement, noise model
* and an optional sensor pose. * and an optional sensor pose.
* This class mainly computes the derivatives and returns them as a variety of factors. * This class mainly computes the derivatives and returns them as a variety of
* The methods take a Cameras argument, which should behave like PinholeCamera, and * factors. The methods take a CameraSet<CAMERA> argument and the value of a
* the value of a point, which is kept in the base class. * point, which is kept in the derived class.
*
* @tparam CAMERA should behave like a PinholeCamera.
*/ */
template<class CAMERA> template<class CAMERA>
class SmartFactorBase: public NonlinearFactor { class SmartFactorBase: public NonlinearFactor {
@ -64,19 +66,20 @@ protected:
/** /**
* As of Feb 22, 2015, the noise model is the same for all measurements and * As of Feb 22, 2015, the noise model is the same for all measurements and
* is isotropic. This allows for moving most calculations of Schur complement * is isotropic. This allows for moving most calculations of Schur complement
* etc to be moved to CameraSet very easily, and also agrees pragmatically * etc. to be easily moved to CameraSet, and also agrees pragmatically
* with what is normally done. * with what is normally done.
*/ */
SharedIsotropic noiseModel_; SharedIsotropic noiseModel_;
/** /**
* 2D measurement and noise model for each of the m views * Measurements for each of the m views.
* We keep a copy of measurements for I/O and computing the error. * We keep a copy of the measurements for I/O and computing the error.
* The order is kept the same as the keys that we use to create the factor. * The order is kept the same as the keys that we use to create the factor.
*/ */
ZVector measured_; ZVector measured_;
boost::optional<Pose3> body_P_sensor_; ///< Pose of the camera in the body frame boost::optional<Pose3>
body_P_sensor_; ///< Pose of the camera in the body frame
// Cache for Fblocks, to avoid a malloc ever time we re-linearize // Cache for Fblocks, to avoid a malloc ever time we re-linearize
mutable FBlocks Fs; mutable FBlocks Fs;
@ -84,16 +87,16 @@ protected:
public: public:
GTSAM_MAKE_ALIGNED_OPERATOR_NEW GTSAM_MAKE_ALIGNED_OPERATOR_NEW
/// shorthand for a smart pointer to a factor /// shorthand for a smart pointer to a factor.
typedef boost::shared_ptr<This> shared_ptr; typedef boost::shared_ptr<This> shared_ptr;
/// We use the new CameraSte data structure to refer to a set of cameras /// The CameraSet data structure is used to refer to a set of cameras.
typedef CameraSet<CAMERA> Cameras; typedef CameraSet<CAMERA> Cameras;
/// Default Constructor, for serialization /// Default Constructor, for serialization.
SmartFactorBase() {} SmartFactorBase() {}
/// Constructor /// Construct with given noise model and optional arguments.
SmartFactorBase(const SharedNoiseModel& sharedNoiseModel, SmartFactorBase(const SharedNoiseModel& sharedNoiseModel,
boost::optional<Pose3> body_P_sensor = boost::none, boost::optional<Pose3> body_P_sensor = boost::none,
size_t expectedNumberCameras = 10) size_t expectedNumberCameras = 10)
@ -111,12 +114,12 @@ protected:
noiseModel_ = sharedIsotropic; noiseModel_ = sharedIsotropic;
} }
/// Virtual destructor, subclasses from NonlinearFactor /// Virtual destructor, subclasses from NonlinearFactor.
~SmartFactorBase() override { ~SmartFactorBase() override {
} }
/** /**
* Add a new measurement and pose/camera key * Add a new measurement and pose/camera key.
* @param measured is the 2m dimensional projection of a single landmark * @param measured is the 2m dimensional projection of a single landmark
* @param key is the index corresponding to the camera observing the landmark * @param key is the index corresponding to the camera observing the landmark
*/ */
@ -129,9 +132,7 @@ protected:
this->keys_.push_back(key); this->keys_.push_back(key);
} }
/** /// Add a bunch of measurements, together with the camera keys.
* Add a bunch of measurements, together with the camera keys
*/
void add(const ZVector& measurements, const KeyVector& cameraKeys) { void add(const ZVector& measurements, const KeyVector& cameraKeys) {
assert(measurements.size() == cameraKeys.size()); assert(measurements.size() == cameraKeys.size());
for (size_t i = 0; i < measurements.size(); i++) { for (size_t i = 0; i < measurements.size(); i++) {
@ -140,8 +141,8 @@ protected:
} }
/** /**
* Adds an entire SfM_track (collection of cameras observing a single point). * Add an entire SfM_track (collection of cameras observing a single point).
* The noise is assumed to be the same for all measurements * The noise is assumed to be the same for all measurements.
*/ */
template<class SFM_TRACK> template<class SFM_TRACK>
void add(const SFM_TRACK& trackToAdd) { void add(const SFM_TRACK& trackToAdd) {
@ -151,17 +152,13 @@ protected:
} }
} }
/// get the dimension (number of rows!) of the factor /// Return the dimension (number of rows!) of the factor.
size_t dim() const override { size_t dim() const override { return ZDim * this->measured_.size(); }
return ZDim * this->measured_.size();
}
/** return the measurements */ /// Return the 2D measurements (ZDim, in general).
const ZVector& measured() const { const ZVector& measured() const { return measured_; }
return measured_;
}
/// Collect all cameras: important that in key order /// Collect all cameras: important that in key order.
virtual Cameras cameras(const Values& values) const { virtual Cameras cameras(const Values& values) const {
Cameras cameras; Cameras cameras;
for(const Key& k: this->keys_) for(const Key& k: this->keys_)
@ -188,25 +185,30 @@ protected:
/// equals /// equals
bool equals(const NonlinearFactor& p, double tol = 1e-9) const override { bool equals(const NonlinearFactor& p, double tol = 1e-9) const override {
const This *e = dynamic_cast<const This*>(&p); if (const This* e = dynamic_cast<const This*>(&p)) {
// Check that all measurements are the same.
bool areMeasurementsEqual = true;
for (size_t i = 0; i < measured_.size(); i++) { for (size_t i = 0; i < measured_.size(); i++) {
if (traits<Z>::Equals(this->measured_.at(i), e->measured_.at(i), tol) == false) if (!traits<Z>::Equals(this->measured_.at(i), e->measured_.at(i), tol))
areMeasurementsEqual = false; return false;
break; }
// If so, check base class.
return Base::equals(p, tol);
} else {
return false;
} }
return e && Base::equals(p, tol) && areMeasurementsEqual;
} }
/// Compute reprojection errors [h(x)-z] = [cameras.project(p)-z] and /// Compute reprojection errors [h(x)-z] = [cameras.project(p)-z] and
/// derivatives /// derivatives. This is the error before the noise model is applied.
template <class POINT> template <class POINT>
Vector unwhitenedError( Vector unwhitenedError(
const Cameras& cameras, const POINT& point, const Cameras& cameras, const POINT& point,
boost::optional<typename Cameras::FBlocks&> Fs = boost::none, // boost::optional<typename Cameras::FBlocks&> Fs = boost::none, //
boost::optional<Matrix&> E = boost::none) const { boost::optional<Matrix&> E = boost::none) const {
Vector ue = cameras.reprojectionError(point, measured_, Fs, E); // Reproject, with optional derivatives.
Vector error = cameras.reprojectionError(point, measured_, Fs, E);
// Apply chain rule if body_P_sensor_ is given.
if (body_P_sensor_ && Fs) { if (body_P_sensor_ && Fs) {
const Pose3 sensor_P_body = body_P_sensor_->inverse(); const Pose3 sensor_P_body = body_P_sensor_->inverse();
constexpr int camera_dim = traits<CAMERA>::dimension; constexpr int camera_dim = traits<CAMERA>::dimension;
@ -224,52 +226,60 @@ protected:
Fs->at(i) = Fs->at(i) * J; Fs->at(i) = Fs->at(i) * J;
} }
} }
correctForMissingMeasurements(cameras, ue, Fs, E);
return ue; // Correct the Jacobians in case some measurements are missing.
correctForMissingMeasurements(cameras, error, Fs, E);
return error;
} }
/** /**
* This corrects the Jacobians for the case in which some pixel measurement is missing (nan) * This corrects the Jacobians for the case in which some 2D measurement is
* In practice, this does not do anything in the monocular case, but it is implemented in the stereo version * missing (nan). In practice, this does not do anything in the monocular
* case, but it is implemented in the stereo version.
*/ */
virtual void correctForMissingMeasurements(const Cameras& cameras, Vector& ue, boost::optional<typename Cameras::FBlocks&> Fs = boost::none, virtual void correctForMissingMeasurements(
const Cameras& cameras, Vector& ue,
boost::optional<typename Cameras::FBlocks&> Fs = boost::none,
boost::optional<Matrix&> E = boost::none) const {} boost::optional<Matrix&> E = boost::none) const {}
/** /**
* Calculate vector of re-projection errors [h(x)-z] = [cameras.project(p) - z] * Calculate vector of re-projection errors [h(x)-z] = [cameras.project(p) -
* Noise model applied * z], with the noise model applied.
*/ */
template<class POINT> template<class POINT>
Vector whitenedError(const Cameras& cameras, const POINT& point) const { Vector whitenedError(const Cameras& cameras, const POINT& point) const {
Vector e = cameras.reprojectionError(point, measured_); Vector error = cameras.reprojectionError(point, measured_);
if (noiseModel_) if (noiseModel_)
noiseModel_->whitenInPlace(e); noiseModel_->whitenInPlace(error);
return e; return error;
} }
/** Calculate the error of the factor. /**
* This is the log-likelihood, e.g. \f$ 0.5(h(x)-z)^2/\sigma^2 \f$ in case of Gaussian. * Calculate the error of the factor.
* In this class, we take the raw prediction error \f$ h(x)-z \f$, ask the noise model * This is the log-likelihood, e.g. \f$ 0.5(h(x)-z)^2/\sigma^2 \f$ in case of
* to transform it to \f$ (h(x)-z)^2/\sigma^2 \f$, and then multiply by 0.5. * Gaussian. In this class, we take the raw prediction error \f$ h(x)-z \f$,
* Will be used in "error(Values)" function required by NonlinearFactor base class * ask the noise model to transform it to \f$ (h(x)-z)^2/\sigma^2 \f$, and
* then multiply by 0.5. Will be used in "error(Values)" function required by
* NonlinearFactor base class
*/ */
template<class POINT> template<class POINT>
double totalReprojectionError(const Cameras& cameras, double totalReprojectionError(const Cameras& cameras,
const POINT& point) const { const POINT& point) const {
Vector e = whitenedError(cameras, point); Vector error = whitenedError(cameras, point);
return 0.5 * e.dot(e); return 0.5 * error.dot(error);
} }
/// Computes Point Covariance P from E /// Computes Point Covariance P from the "point Jacobian" E.
static Matrix PointCov(Matrix& E) { static Matrix PointCov(const Matrix& E) {
return (E.transpose() * E).inverse(); return (E.transpose() * E).inverse();
} }
/** /**
* Compute F, E, and b (called below in both vanilla and SVD versions), where * Compute F, E, and b (called below in both vanilla and SVD versions), where
* F is a vector of derivatives wrpt the cameras, and E the stacked derivatives * F is a vector of derivatives wrpt the cameras, and E the stacked
* with respect to the point. The value of cameras/point are passed as parameters. * derivatives with respect to the point. The value of cameras/point are
* TODO: Kill this obsolete method * passed as parameters.
*/ */
template<class POINT> template<class POINT>
void computeJacobians(FBlocks& Fs, Matrix& E, Vector& b, void computeJacobians(FBlocks& Fs, Matrix& E, Vector& b,
@ -281,7 +291,11 @@ protected:
b = -unwhitenedError(cameras, point, Fs, E); b = -unwhitenedError(cameras, point, Fs, E);
} }
/// SVD version /**
* SVD version that produces smaller Jacobian matrices by doing an SVD
* decomposition on E, and returning the left nulkl-space of E.
* See JacobianFactorSVD for more documentation.
* */
template<class POINT> template<class POINT>
void computeJacobiansSVD(FBlocks& Fs, Matrix& Enull, void computeJacobiansSVD(FBlocks& Fs, Matrix& Enull,
Vector& b, const Cameras& cameras, const POINT& point) const { Vector& b, const Cameras& cameras, const POINT& point) const {
@ -291,14 +305,14 @@ protected:
static const int N = FixedDimension<POINT>::value; // 2 (Unit3) or 3 (Point3) static const int N = FixedDimension<POINT>::value; // 2 (Unit3) or 3 (Point3)
// Do SVD on A // Do SVD on A.
Eigen::JacobiSVD<Matrix> svd(E, Eigen::ComputeFullU); Eigen::JacobiSVD<Matrix> svd(E, Eigen::ComputeFullU);
Vector s = svd.singularValues();
size_t m = this->keys_.size(); size_t m = this->keys_.size();
Enull = svd.matrixU().block(0, N, ZDim * m, ZDim * m - N); // last ZDim*m-N columns Enull = svd.matrixU().block(0, N, ZDim * m, ZDim * m - N); // last ZDim*m-N columns
} }
/// Linearize to a Hessianfactor /// Linearize to a Hessianfactor.
// TODO(dellaert): Not used/tested anywhere and not properly whitened.
boost::shared_ptr<RegularHessianFactor<Dim> > createHessianFactor( boost::shared_ptr<RegularHessianFactor<Dim> > createHessianFactor(
const Cameras& cameras, const Point3& point, const double lambda = 0.0, const Cameras& cameras, const Point3& point, const double lambda = 0.0,
bool diagonalDamping = false) const { bool diagonalDamping = false) const {
@ -351,9 +365,7 @@ protected:
P, b); P, b);
} }
/** /// Return Jacobians as JacobianFactorQ.
* Return Jacobians as JacobianFactorQ
*/
boost::shared_ptr<JacobianFactorQ<Dim, ZDim> > createJacobianQFactor( boost::shared_ptr<JacobianFactorQ<Dim, ZDim> > createJacobianQFactor(
const Cameras& cameras, const Point3& point, double lambda = 0.0, const Cameras& cameras, const Point3& point, double lambda = 0.0,
bool diagonalDamping = false) const { bool diagonalDamping = false) const {
@ -368,8 +380,8 @@ protected:
} }
/** /**
* Return Jacobians as JacobianFactorSVD * Return Jacobians as JacobianFactorSVD.
* TODO lambda is currently ignored * TODO(dellaert): lambda is currently ignored
*/ */
boost::shared_ptr<JacobianFactor> createJacobianSVDFactor( boost::shared_ptr<JacobianFactor> createJacobianSVDFactor(
const Cameras& cameras, const Point3& point, double lambda = 0.0) const { const Cameras& cameras, const Point3& point, double lambda = 0.0) const {
@ -393,7 +405,7 @@ protected:
F.block<ZDim, Dim>(ZDim * i, Dim * i) = Fs.at(i); F.block<ZDim, Dim>(ZDim * i, Dim * i) = Fs.at(i);
} }
// Return sensor pose.
Pose3 body_P_sensor() const{ Pose3 body_P_sensor() const{
if(body_P_sensor_) if(body_P_sensor_)
return *body_P_sensor_; return *body_P_sensor_;

View File

@ -61,15 +61,17 @@ protected:
/// @name Caching triangulation /// @name Caching triangulation
/// @{ /// @{
mutable TriangulationResult result_; ///< result from triangulateSafe mutable TriangulationResult result_; ///< result from triangulateSafe
mutable std::vector<Pose3, Eigen::aligned_allocator<Pose3> > cameraPosesTriangulation_; ///< current triangulation poses mutable std::vector<Pose3, Eigen::aligned_allocator<Pose3> >
cameraPosesTriangulation_; ///< current triangulation poses
/// @} /// @}
public: public:
/// shorthand for a smart pointer to a factor /// shorthand for a smart pointer to a factor
typedef boost::shared_ptr<This> shared_ptr; typedef boost::shared_ptr<This> shared_ptr;
/// shorthand for a set of cameras /// shorthand for a set of cameras
typedef CAMERA Camera;
typedef CameraSet<CAMERA> Cameras; typedef CameraSet<CAMERA> Cameras;
/** /**
@ -116,21 +118,31 @@ public:
&& Base::equals(p, tol); && Base::equals(p, tol);
} }
/// Check if the new linearization point is the same as the one used for previous triangulation /**
* @brief Check if the new linearization point is the same as the one used for
* previous triangulation.
*
* @param cameras
* @return true if we need to re-triangulate.
*/
bool decideIfTriangulate(const Cameras& cameras) const { bool decideIfTriangulate(const Cameras& cameras) const {
// several calls to linearize will be done from the same linearization point, hence it is not needed to re-triangulate // Several calls to linearize will be done from the same linearization
// Note that this is not yet "selecting linearization", that will come later, and we only check if the // point, hence it is not needed to re-triangulate. Note that this is not
// current linearization is the "same" (up to tolerance) w.r.t. the last time we triangulated the point // yet "selecting linearization", that will come later, and we only check if
// the current linearization is the "same" (up to tolerance) w.r.t. the last
// time we triangulated the point.
size_t m = cameras.size(); size_t m = cameras.size();
bool retriangulate = false; bool retriangulate = false;
// if we do not have a previous linearization point or the new linearization point includes more poses // Definitely true if we do not have a previous linearization point or the
// new linearization point includes more poses.
if (cameraPosesTriangulation_.empty() if (cameraPosesTriangulation_.empty()
|| cameras.size() != cameraPosesTriangulation_.size()) || cameras.size() != cameraPosesTriangulation_.size())
retriangulate = true; retriangulate = true;
// Otherwise, check poses against cache.
if (!retriangulate) { if (!retriangulate) {
for (size_t i = 0; i < cameras.size(); i++) { for (size_t i = 0; i < cameras.size(); i++) {
if (!cameras[i].pose().equals(cameraPosesTriangulation_[i], if (!cameras[i].pose().equals(cameraPosesTriangulation_[i],
@ -141,7 +153,8 @@ public:
} }
} }
if (retriangulate) { // we store the current poses used for triangulation // Store the current poses used for triangulation if we will re-triangulate.
if (retriangulate) {
cameraPosesTriangulation_.clear(); cameraPosesTriangulation_.clear();
cameraPosesTriangulation_.reserve(m); cameraPosesTriangulation_.reserve(m);
for (size_t i = 0; i < m; i++) for (size_t i = 0; i < m; i++)
@ -149,10 +162,15 @@ public:
cameraPosesTriangulation_.push_back(cameras[i].pose()); cameraPosesTriangulation_.push_back(cameras[i].pose());
} }
return retriangulate; // if we arrive to this point all poses are the same and we don't need re-triangulation return retriangulate;
} }
/// triangulateSafe /**
* @brief Call gtsam::triangulateSafe iff we need to re-triangulate.
*
* @param cameras
* @return TriangulationResult
*/
TriangulationResult triangulateSafe(const Cameras& cameras) const { TriangulationResult triangulateSafe(const Cameras& cameras) const {
size_t m = cameras.size(); size_t m = cameras.size();
@ -166,17 +184,21 @@ public:
return result_; return result_;
} }
/// triangulate /**
* @brief Possibly re-triangulate before calculating Jacobians.
*
* @param cameras
* @return true if we could safely triangulate
*/
bool triangulateForLinearize(const Cameras& cameras) const { bool triangulateForLinearize(const Cameras& cameras) const {
triangulateSafe(cameras); // imperative, might reset result_ triangulateSafe(cameras); // imperative, might reset result_
return bool(result_); return bool(result_);
} }
/// linearize returns a Hessianfactor that is an approximation of error(p) /// Create a Hessianfactor that is an approximation of error(p).
boost::shared_ptr<RegularHessianFactor<Base::Dim> > createHessianFactor( boost::shared_ptr<RegularHessianFactor<Base::Dim> > createHessianFactor(
const Cameras& cameras, const double lambda = 0.0, bool diagonalDamping = const Cameras& cameras, const double lambda = 0.0,
false) const { bool diagonalDamping = false) const {
size_t numKeys = this->keys_.size(); size_t numKeys = this->keys_.size();
// Create structures for Hessian Factors // Create structures for Hessian Factors
KeyVector js; KeyVector js;
@ -184,39 +206,38 @@ public:
std::vector<Vector> gs(numKeys); std::vector<Vector> gs(numKeys);
if (this->measured_.size() != cameras.size()) if (this->measured_.size() != cameras.size())
throw std::runtime_error("SmartProjectionHessianFactor: this->measured_" throw std::runtime_error(
"SmartProjectionHessianFactor: this->measured_"
".size() inconsistent with input"); ".size() inconsistent with input");
triangulateSafe(cameras); triangulateSafe(cameras);
if (params_.degeneracyMode == ZERO_ON_DEGENERACY && !result_) { if (params_.degeneracyMode == ZERO_ON_DEGENERACY && !result_) {
// failed: return"empty" Hessian // failed: return"empty" Hessian
for(Matrix& m: Gs) for (Matrix& m : Gs) m = Matrix::Zero(Base::Dim, Base::Dim);
m = Matrix::Zero(Base::Dim, Base::Dim); for (Vector& v : gs) v = Vector::Zero(Base::Dim);
for(Vector& v: gs)
v = Vector::Zero(Base::Dim);
return boost::make_shared<RegularHessianFactor<Base::Dim> >(this->keys_, return boost::make_shared<RegularHessianFactor<Base::Dim> >(this->keys_,
Gs, gs, 0.0); Gs, gs, 0.0);
} }
// Jacobian could be 3D Point3 OR 2D Unit3, difference is E.cols(). // Jacobian could be 3D Point3 OR 2D Unit3, difference is E.cols().
std::vector<typename Base::MatrixZD, Eigen::aligned_allocator<typename Base::MatrixZD> > Fblocks; typename Base::FBlocks Fs;
Matrix E; Matrix E;
Vector b; Vector b;
computeJacobiansWithTriangulatedPoint(Fblocks, E, b, cameras); computeJacobiansWithTriangulatedPoint(Fs, E, b, cameras);
// Whiten using noise model // Whiten using noise model
Base::whitenJacobians(Fblocks, E, b); Base::whitenJacobians(Fs, E, b);
// build augmented hessian // build augmented hessian
SymmetricBlockMatrix augmentedHessian = // SymmetricBlockMatrix augmentedHessian = //
Cameras::SchurComplement(Fblocks, E, b, lambda, diagonalDamping); Cameras::SchurComplement(Fs, E, b, lambda, diagonalDamping);
return boost::make_shared<RegularHessianFactor<Base::Dim> >(this->keys_, return boost::make_shared<RegularHessianFactor<Base::Dim> >(
augmentedHessian); this->keys_, augmentedHessian);
} }
// create factor // Create RegularImplicitSchurFactor factor.
boost::shared_ptr<RegularImplicitSchurFactor<CAMERA> > createRegularImplicitSchurFactor( boost::shared_ptr<RegularImplicitSchurFactor<CAMERA> > createRegularImplicitSchurFactor(
const Cameras& cameras, double lambda) const { const Cameras& cameras, double lambda) const {
if (triangulateForLinearize(cameras)) if (triangulateForLinearize(cameras))
@ -226,7 +247,7 @@ public:
return boost::shared_ptr<RegularImplicitSchurFactor<CAMERA> >(); return boost::shared_ptr<RegularImplicitSchurFactor<CAMERA> >();
} }
/// create factor /// Create JacobianFactorQ factor.
boost::shared_ptr<JacobianFactorQ<Base::Dim, 2> > createJacobianQFactor( boost::shared_ptr<JacobianFactorQ<Base::Dim, 2> > createJacobianQFactor(
const Cameras& cameras, double lambda) const { const Cameras& cameras, double lambda) const {
if (triangulateForLinearize(cameras)) if (triangulateForLinearize(cameras))
@ -236,13 +257,13 @@ public:
return boost::make_shared<JacobianFactorQ<Base::Dim, 2> >(this->keys_); return boost::make_shared<JacobianFactorQ<Base::Dim, 2> >(this->keys_);
} }
/// Create a factor, takes values /// Create JacobianFactorQ factor, takes values.
boost::shared_ptr<JacobianFactorQ<Base::Dim, 2> > createJacobianQFactor( boost::shared_ptr<JacobianFactorQ<Base::Dim, 2> > createJacobianQFactor(
const Values& values, double lambda) const { const Values& values, double lambda) const {
return createJacobianQFactor(this->cameras(values), lambda); return createJacobianQFactor(this->cameras(values), lambda);
} }
/// different (faster) way to compute Jacobian factor /// Different (faster) way to compute a JacobianFactorSVD factor.
boost::shared_ptr<JacobianFactor> createJacobianSVDFactor( boost::shared_ptr<JacobianFactor> createJacobianSVDFactor(
const Cameras& cameras, double lambda) const { const Cameras& cameras, double lambda) const {
if (triangulateForLinearize(cameras)) if (triangulateForLinearize(cameras))
@ -252,19 +273,19 @@ public:
return boost::make_shared<JacobianFactorSVD<Base::Dim, 2> >(this->keys_); return boost::make_shared<JacobianFactorSVD<Base::Dim, 2> >(this->keys_);
} }
/// linearize to a Hessianfactor /// Linearize to a Hessianfactor.
virtual boost::shared_ptr<RegularHessianFactor<Base::Dim> > linearizeToHessian( virtual boost::shared_ptr<RegularHessianFactor<Base::Dim> > linearizeToHessian(
const Values& values, double lambda = 0.0) const { const Values& values, double lambda = 0.0) const {
return createHessianFactor(this->cameras(values), lambda); return createHessianFactor(this->cameras(values), lambda);
} }
/// linearize to an Implicit Schur factor /// Linearize to an Implicit Schur factor.
virtual boost::shared_ptr<RegularImplicitSchurFactor<CAMERA> > linearizeToImplicit( virtual boost::shared_ptr<RegularImplicitSchurFactor<CAMERA> > linearizeToImplicit(
const Values& values, double lambda = 0.0) const { const Values& values, double lambda = 0.0) const {
return createRegularImplicitSchurFactor(this->cameras(values), lambda); return createRegularImplicitSchurFactor(this->cameras(values), lambda);
} }
/// linearize to a JacobianfactorQ /// Linearize to a JacobianfactorQ.
virtual boost::shared_ptr<JacobianFactorQ<Base::Dim, 2> > linearizeToJacobian( virtual boost::shared_ptr<JacobianFactorQ<Base::Dim, 2> > linearizeToJacobian(
const Values& values, double lambda = 0.0) const { const Values& values, double lambda = 0.0) const {
return createJacobianQFactor(this->cameras(values), lambda); return createJacobianQFactor(this->cameras(values), lambda);
@ -334,7 +355,7 @@ public:
/// Assumes the point has been computed /// Assumes the point has been computed
/// Note E can be 2m*3 or 2m*2, in case point is degenerate /// Note E can be 2m*3 or 2m*2, in case point is degenerate
void computeJacobiansWithTriangulatedPoint( void computeJacobiansWithTriangulatedPoint(
std::vector<typename Base::MatrixZD, Eigen::aligned_allocator<typename Base::MatrixZD> >& Fblocks, Matrix& E, Vector& b, typename Base::FBlocks& Fs, Matrix& E, Vector& b,
const Cameras& cameras) const { const Cameras& cameras) const {
if (!result_) { if (!result_) {
@ -342,32 +363,32 @@ public:
// TODO check flag whether we should do this // TODO check flag whether we should do this
Unit3 backProjected = cameras[0].backprojectPointAtInfinity( Unit3 backProjected = cameras[0].backprojectPointAtInfinity(
this->measured_.at(0)); this->measured_.at(0));
Base::computeJacobians(Fblocks, E, b, cameras, backProjected); Base::computeJacobians(Fs, E, b, cameras, backProjected);
} else { } else {
// valid result: just return Base version // valid result: just return Base version
Base::computeJacobians(Fblocks, E, b, cameras, *result_); Base::computeJacobians(Fs, E, b, cameras, *result_);
} }
} }
/// Version that takes values, and creates the point /// Version that takes values, and creates the point
bool triangulateAndComputeJacobians( bool triangulateAndComputeJacobians(
std::vector<typename Base::MatrixZD, Eigen::aligned_allocator<typename Base::MatrixZD> >& Fblocks, Matrix& E, Vector& b, typename Base::FBlocks& Fs, Matrix& E, Vector& b,
const Values& values) const { const Values& values) const {
Cameras cameras = this->cameras(values); Cameras cameras = this->cameras(values);
bool nonDegenerate = triangulateForLinearize(cameras); bool nonDegenerate = triangulateForLinearize(cameras);
if (nonDegenerate) if (nonDegenerate)
computeJacobiansWithTriangulatedPoint(Fblocks, E, b, cameras); computeJacobiansWithTriangulatedPoint(Fs, E, b, cameras);
return nonDegenerate; return nonDegenerate;
} }
/// takes values /// takes values
bool triangulateAndComputeJacobiansSVD( bool triangulateAndComputeJacobiansSVD(
std::vector<typename Base::MatrixZD, Eigen::aligned_allocator<typename Base::MatrixZD> >& Fblocks, Matrix& Enull, Vector& b, typename Base::FBlocks& Fs, Matrix& Enull, Vector& b,
const Values& values) const { const Values& values) const {
Cameras cameras = this->cameras(values); Cameras cameras = this->cameras(values);
bool nonDegenerate = triangulateForLinearize(cameras); bool nonDegenerate = triangulateForLinearize(cameras);
if (nonDegenerate) if (nonDegenerate)
Base::computeJacobiansSVD(Fblocks, Enull, b, cameras, *result_); Base::computeJacobiansSVD(Fs, Enull, b, cameras, *result_);
return nonDegenerate; return nonDegenerate;
} }

View File

@ -0,0 +1,370 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file SmartProjectionRigFactor.h
* @brief Smart factor on poses, assuming camera calibration is fixed.
* Same as SmartProjectionPoseFactor, except:
* - it is templated on CAMERA (i.e., it allows cameras beyond pinhole)
* - it admits a different calibration for each measurement (i.e., it
* can model a multi-camera rig system)
* - it allows multiple observations from the same pose/key (again, to
* model a multi-camera system)
* @author Luca Carlone
* @author Frank Dellaert
*/
#pragma once
#include <gtsam/slam/SmartProjectionFactor.h>
namespace gtsam {
/**
*
* @addtogroup SLAM
*
* If you are using the factor, please cite:
* L. Carlone, Z. Kira, C. Beall, V. Indelman, F. Dellaert, Eliminating
* conditionally independent sets in factor graphs: a unifying perspective based
* on smart factors, Int. Conf. on Robotics and Automation (ICRA), 2014.
*/
/**
* This factor assumes that camera calibration is fixed (but each measurement
* can be taken by a different camera in the rig, hence can have a different
* extrinsic and intrinsic calibration). The factor only constrains poses
* (variable dimension is 6 for each pose). This factor requires that values
* contains the involved poses (Pose3). If all measurements share the same
* calibration (i.e., are from the same camera), use SmartProjectionPoseFactor
* instead! If the calibration should be optimized, as well, use
* SmartProjectionFactor instead!
* @addtogroup SLAM
*/
template <class CAMERA>
class SmartProjectionRigFactor : public SmartProjectionFactor<CAMERA> {
private:
typedef SmartProjectionFactor<CAMERA> Base;
typedef SmartProjectionRigFactor<CAMERA> This;
typedef typename CAMERA::CalibrationType CALIBRATION;
static const int DimPose = 6; ///< Pose3 dimension
static const int ZDim = 2; ///< Measurement dimension
protected:
/// vector of keys (one for each observation) with potentially repeated keys
KeyVector nonUniqueKeys_;
/// cameras in the rig (fixed poses wrt body and intrinsics, for each camera)
boost::shared_ptr<typename Base::Cameras> cameraRig_;
/// vector of camera Ids (one for each observation, in the same order),
/// identifying which camera took the measurement
FastVector<size_t> cameraIds_;
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
typedef CAMERA Camera;
typedef CameraSet<CAMERA> Cameras;
/// shorthand for a smart pointer to a factor
typedef boost::shared_ptr<This> shared_ptr;
/// Default constructor, only for serialization
SmartProjectionRigFactor() {}
/**
* Constructor
* @param sharedNoiseModel isotropic noise model for the 2D feature
* measurements
* @param cameraRig set of cameras (fixed poses wrt body and intrinsics) in
* the camera rig
* @param params parameters for the smart projection factors
*/
SmartProjectionRigFactor(
const SharedNoiseModel& sharedNoiseModel,
const boost::shared_ptr<Cameras>& cameraRig,
const SmartProjectionParams& params = SmartProjectionParams())
: Base(sharedNoiseModel, params), cameraRig_(cameraRig) {
// throw exception if configuration is not supported by this factor
if (Base::params_.degeneracyMode != gtsam::ZERO_ON_DEGENERACY)
throw std::runtime_error(
"SmartProjectionRigFactor: "
"degeneracyMode must be set to ZERO_ON_DEGENERACY");
if (Base::params_.linearizationMode != gtsam::HESSIAN)
throw std::runtime_error(
"SmartProjectionRigFactor: "
"linearizationMode must be set to HESSIAN");
}
/** Virtual destructor */
~SmartProjectionRigFactor() override = default;
/**
* add a new measurement, corresponding to an observation from pose "poseKey"
* and taken from the camera in the rig identified by "cameraId"
* @param measured 2-dimensional location of the projection of a
* single landmark in a single view (the measurement)
* @param poseKey key corresponding to the body pose of the camera taking the
* measurement
* @param cameraId ID of the camera in the rig taking the measurement (default
* 0)
*/
void add(const Point2& measured, const Key& poseKey,
const size_t& cameraId = 0) {
// store measurement and key
this->measured_.push_back(measured);
this->nonUniqueKeys_.push_back(poseKey);
// also store keys in the keys_ vector: these keys are assumed to be
// unique, so we avoid duplicates here
if (std::find(this->keys_.begin(), this->keys_.end(), poseKey) ==
this->keys_.end())
this->keys_.push_back(poseKey); // add only unique keys
// store id of the camera taking the measurement
cameraIds_.push_back(cameraId);
}
/**
* Variant of the previous "add" function in which we include multiple
* measurements
* @param measurements vector of the 2m dimensional location of the projection
* of a single landmark in the m views (the measurements)
* @param poseKeys keys corresponding to the body poses of the cameras taking
* the measurements
* @param cameraIds IDs of the cameras in the rig taking each measurement
* (same order as the measurements)
*/
void add(const Point2Vector& measurements, const KeyVector& poseKeys,
const FastVector<size_t>& cameraIds = FastVector<size_t>()) {
if (poseKeys.size() != measurements.size() ||
(poseKeys.size() != cameraIds.size() && cameraIds.size() != 0)) {
throw std::runtime_error(
"SmartProjectionRigFactor: "
"trying to add inconsistent inputs");
}
if (cameraIds.size() == 0 && cameraRig_->size() > 1) {
throw std::runtime_error(
"SmartProjectionRigFactor: "
"camera rig includes multiple camera "
"but add did not input cameraIds");
}
for (size_t i = 0; i < measurements.size(); i++) {
add(measurements[i], poseKeys[i],
cameraIds.size() == 0 ? 0 : cameraIds[i]);
}
}
/// return (for each observation) the (possibly non unique) keys involved in
/// the measurements
const KeyVector& nonUniqueKeys() const { return nonUniqueKeys_; }
/// return the calibration object
const boost::shared_ptr<Cameras>& cameraRig() const { return cameraRig_; }
/// return the calibration object
const FastVector<size_t>& cameraIds() const { return cameraIds_; }
/**
* print
* @param s optional string naming the factor
* @param keyFormatter optional formatter useful for printing Symbols
*/
void print(
const std::string& s = "",
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override {
std::cout << s << "SmartProjectionRigFactor: \n ";
for (size_t i = 0; i < nonUniqueKeys_.size(); i++) {
std::cout << "-- Measurement nr " << i << std::endl;
std::cout << "key: " << keyFormatter(nonUniqueKeys_[i]) << std::endl;
std::cout << "cameraId: " << cameraIds_[i] << std::endl;
(*cameraRig_)[cameraIds_[i]].print("camera in rig:\n");
}
Base::print("", keyFormatter);
}
/// equals
bool equals(const NonlinearFactor& p, double tol = 1e-9) const override {
const This* e = dynamic_cast<const This*>(&p);
return e && Base::equals(p, tol) && nonUniqueKeys_ == e->nonUniqueKeys() &&
cameraRig_->equals(*(e->cameraRig())) &&
std::equal(cameraIds_.begin(), cameraIds_.end(),
e->cameraIds().begin());
}
/**
* Collect all cameras involved in this factor
* @param values Values structure which must contain body poses corresponding
* to keys involved in this factor
* @return vector of cameras
*/
typename Base::Cameras cameras(const Values& values) const override {
typename Base::Cameras cameras;
cameras.reserve(nonUniqueKeys_.size()); // preallocate
for (size_t i = 0; i < nonUniqueKeys_.size(); i++) {
const typename Base::Camera& camera_i = (*cameraRig_)[cameraIds_[i]];
const Pose3 world_P_sensor_i =
values.at<Pose3>(nonUniqueKeys_[i]) // = world_P_body
* camera_i.pose(); // = body_P_cam_i
cameras.emplace_back(world_P_sensor_i,
make_shared<typename CAMERA::CalibrationType>(
camera_i.calibration()));
}
return cameras;
}
/**
* error calculates the error of the factor.
*/
double error(const Values& values) const override {
if (this->active(values)) {
return this->totalReprojectionError(this->cameras(values));
} else { // else of active flag
return 0.0;
}
}
/**
* Compute jacobian F, E and error vector at a given linearization point
* @param values Values structure which must contain camera poses
* corresponding to keys involved in this factor
* @return Return arguments are the camera jacobians Fs (including the
* jacobian with respect to both body poses we interpolate from), the point
* Jacobian E, and the error vector b. Note that the jacobians are computed
* for a given point.
*/
void computeJacobiansWithTriangulatedPoint(typename Base::FBlocks& Fs,
Matrix& E, Vector& b,
const Cameras& cameras) const {
if (!this->result_) {
throw("computeJacobiansWithTriangulatedPoint");
} else { // valid result: compute jacobians
b = -cameras.reprojectionError(*this->result_, this->measured_, Fs, E);
for (size_t i = 0; i < Fs.size(); i++) {
const Pose3& body_P_sensor = (*cameraRig_)[cameraIds_[i]].pose();
const Pose3 world_P_body = cameras[i].pose() * body_P_sensor.inverse();
Eigen::Matrix<double, DimPose, DimPose> H;
world_P_body.compose(body_P_sensor, H);
Fs.at(i) = Fs.at(i) * H;
}
}
}
/// linearize and return a Hessianfactor that is an approximation of error(p)
boost::shared_ptr<RegularHessianFactor<DimPose> > createHessianFactor(
const Values& values, const double& lambda = 0.0,
bool diagonalDamping = false) const {
// we may have multiple observation sharing the same keys (e.g., 2 cameras
// measuring from the same body pose), hence the number of unique keys may
// be smaller than nrMeasurements
size_t nrUniqueKeys =
this->keys_
.size(); // note: by construction, keys_ only contains unique keys
Cameras cameras = this->cameras(values);
// Create structures for Hessian Factors
std::vector<size_t> js;
std::vector<Matrix> Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2);
std::vector<Vector> gs(nrUniqueKeys);
if (this->measured_.size() != cameras.size()) // 1 observation per camera
throw std::runtime_error(
"SmartProjectionRigFactor: "
"measured_.size() inconsistent with input");
// triangulate 3D point at given linearization point
this->triangulateSafe(cameras);
if (!this->result_) { // failed: return "empty/zero" Hessian
if (this->params_.degeneracyMode == ZERO_ON_DEGENERACY) {
for (Matrix& m : Gs) m = Matrix::Zero(DimPose, DimPose);
for (Vector& v : gs) v = Vector::Zero(DimPose);
return boost::make_shared<RegularHessianFactor<DimPose> >(this->keys_,
Gs, gs, 0.0);
} else {
throw std::runtime_error(
"SmartProjectionRigFactor: "
"only supported degeneracy mode is ZERO_ON_DEGENERACY");
}
}
// compute Jacobian given triangulated 3D Point
typename Base::FBlocks Fs;
Matrix E;
Vector b;
this->computeJacobiansWithTriangulatedPoint(Fs, E, b, cameras);
// Whiten using noise model
this->noiseModel_->WhitenSystem(E, b);
for (size_t i = 0; i < Fs.size(); i++) {
Fs[i] = this->noiseModel_->Whiten(Fs[i]);
}
const Matrix3 P = Base::Cameras::PointCov(E, lambda, diagonalDamping);
// Build augmented Hessian (with last row/column being the information
// vector) Note: we need to get the augumented hessian wrt the unique keys
// in key_
SymmetricBlockMatrix augmentedHessianUniqueKeys =
Base::Cameras::template SchurComplementAndRearrangeBlocks<3, 6, 6>(
Fs, E, P, b, nonUniqueKeys_, this->keys_);
return boost::make_shared<RegularHessianFactor<DimPose> >(
this->keys_, augmentedHessianUniqueKeys);
}
/**
* Linearize to Gaussian Factor (possibly adding a damping factor Lambda for
* LM)
* @param values Values structure which must contain camera poses and
* extrinsic pose for this factor
* @return a Gaussian factor
*/
boost::shared_ptr<GaussianFactor> linearizeDamped(
const Values& values, const double& lambda = 0.0) const {
// depending on flag set on construction we may linearize to different
// linear factors
switch (this->params_.linearizationMode) {
case HESSIAN:
return this->createHessianFactor(values, lambda);
default:
throw std::runtime_error(
"SmartProjectionRigFactor: unknown linearization mode");
}
}
/// linearize
boost::shared_ptr<GaussianFactor> linearize(
const Values& values) const override {
return this->linearizeDamped(values);
}
private:
/// Serialization function
friend class boost::serialization::access;
template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
// ar& BOOST_SERIALIZATION_NVP(nonUniqueKeys_);
// ar& BOOST_SERIALIZATION_NVP(cameraRig_);
// ar& BOOST_SERIALIZATION_NVP(cameraIds_);
}
};
// end of class declaration
/// traits
template <class CAMERA>
struct traits<SmartProjectionRigFactor<CAMERA> >
: public Testable<SmartProjectionRigFactor<CAMERA> > {};
} // namespace gtsam

View File

@ -22,6 +22,7 @@
#include <gtsam/slam/GeneralSFMFactor.h> #include <gtsam/slam/GeneralSFMFactor.h>
#include <gtsam/geometry/Cal3_S2.h> #include <gtsam/geometry/Cal3_S2.h>
#include <gtsam/geometry/Cal3Bundler.h> #include <gtsam/geometry/Cal3Bundler.h>
#include "../SmartProjectionRigFactor.h"
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
@ -68,7 +69,9 @@ SmartProjectionParams params;
// default Cal3_S2 poses // default Cal3_S2 poses
namespace vanillaPose { namespace vanillaPose {
typedef PinholePose<Cal3_S2> Camera; typedef PinholePose<Cal3_S2> Camera;
typedef CameraSet<Camera> Cameras;
typedef SmartProjectionPoseFactor<Cal3_S2> SmartFactor; typedef SmartProjectionPoseFactor<Cal3_S2> SmartFactor;
typedef SmartProjectionRigFactor<Camera> SmartRigFactor;
static Cal3_S2::shared_ptr sharedK(new Cal3_S2(fov, w, h)); static Cal3_S2::shared_ptr sharedK(new Cal3_S2(fov, w, h));
Camera level_camera(level_pose, sharedK); Camera level_camera(level_pose, sharedK);
Camera level_camera_right(pose_right, sharedK); Camera level_camera_right(pose_right, sharedK);
@ -81,7 +84,9 @@ Camera cam3(pose_above, sharedK);
// default Cal3_S2 poses // default Cal3_S2 poses
namespace vanillaPose2 { namespace vanillaPose2 {
typedef PinholePose<Cal3_S2> Camera; typedef PinholePose<Cal3_S2> Camera;
typedef CameraSet<Camera> Cameras;
typedef SmartProjectionPoseFactor<Cal3_S2> SmartFactor; typedef SmartProjectionPoseFactor<Cal3_S2> SmartFactor;
typedef SmartProjectionRigFactor<Camera> SmartRigFactor;
static Cal3_S2::shared_ptr sharedK2(new Cal3_S2(1500, 1200, 0, 640, 480)); static Cal3_S2::shared_ptr sharedK2(new Cal3_S2(1500, 1200, 0, 640, 480));
Camera level_camera(level_pose, sharedK2); Camera level_camera(level_pose, sharedK2);
Camera level_camera_right(pose_right, sharedK2); Camera level_camera_right(pose_right, sharedK2);
@ -94,6 +99,7 @@ Camera cam3(pose_above, sharedK2);
// Cal3Bundler cameras // Cal3Bundler cameras
namespace bundler { namespace bundler {
typedef PinholeCamera<Cal3Bundler> Camera; typedef PinholeCamera<Cal3Bundler> Camera;
typedef CameraSet<Camera> Cameras;
typedef SmartProjectionFactor<Camera> SmartFactor; typedef SmartProjectionFactor<Camera> SmartFactor;
static Cal3Bundler K(500, 1e-3, 1e-3, 0, 0); static Cal3Bundler K(500, 1e-3, 1e-3, 0, 0);
Camera level_camera(level_pose, K); Camera level_camera(level_pose, K);
@ -110,7 +116,9 @@ typedef GeneralSFMFactor<Camera, Point3> SFMFactor;
// Cal3Bundler poses // Cal3Bundler poses
namespace bundlerPose { namespace bundlerPose {
typedef PinholePose<Cal3Bundler> Camera; typedef PinholePose<Cal3Bundler> Camera;
typedef CameraSet<Camera> Cameras;
typedef SmartProjectionPoseFactor<Cal3Bundler> SmartFactor; typedef SmartProjectionPoseFactor<Cal3Bundler> SmartFactor;
typedef SmartProjectionRigFactor<Camera> SmartRigFactor;
static boost::shared_ptr<Cal3Bundler> sharedBundlerK( static boost::shared_ptr<Cal3Bundler> sharedBundlerK(
new Cal3Bundler(500, 1e-3, 1e-3, 1000, 2000)); new Cal3Bundler(500, 1e-3, 1e-3, 1000, 2000));
Camera level_camera(level_pose, sharedBundlerK); Camera level_camera(level_pose, sharedBundlerK);

View File

@ -127,13 +127,13 @@ TEST(SmartFactorBase, Stereo) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
TEST(SmartFactorBase, serialize) { TEST(SmartFactorBase, serialize) {
using namespace gtsam::serializationTestHelpers; using namespace gtsam::serializationTestHelpers;

View File

@ -811,13 +811,13 @@ TEST(SmartProjectionFactor, implicitJacobianFactor ) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
TEST(SmartProjectionFactor, serialize) { TEST(SmartProjectionFactor, serialize) {
using namespace vanilla; using namespace vanilla;

View File

@ -1333,13 +1333,13 @@ TEST( SmartProjectionPoseFactor, Cal3BundlerRotationOnly ) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
TEST(SmartProjectionPoseFactor, serialize) { TEST(SmartProjectionPoseFactor, serialize) {
using namespace vanillaPose; using namespace vanillaPose;

File diff suppressed because it is too large Load Diff

View File

@ -5,61 +5,60 @@
* @author Frank Dellaert * @author Frank Dellaert
*/ */
#include <gtsam_unstable/discrete/Domain.h>
#include <gtsam_unstable/discrete/AllDiff.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam_unstable/discrete/AllDiff.h>
#include <gtsam_unstable/discrete/Domain.h>
#include <boost/make_shared.hpp> #include <boost/make_shared.hpp>
namespace gtsam { namespace gtsam {
/* ************************************************************************* */ /* ************************************************************************* */
AllDiff::AllDiff(const DiscreteKeys& dkeys) : AllDiff::AllDiff(const DiscreteKeys& dkeys) : Constraint(dkeys.indices()) {
Constraint(dkeys.indices()) { for (const DiscreteKey& dkey : dkeys) cardinalities_.insert(dkey);
for(const DiscreteKey& dkey: dkeys) }
cardinalities_.insert(dkey);
}
/* ************************************************************************* */ /* ************************************************************************* */
void AllDiff::print(const std::string& s, void AllDiff::print(const std::string& s, const KeyFormatter& formatter) const {
const KeyFormatter& formatter) const {
std::cout << s << "AllDiff on "; std::cout << s << "AllDiff on ";
for (Key dkey: keys_) for (Key dkey : keys_) std::cout << formatter(dkey) << " ";
std::cout << formatter(dkey) << " ";
std::cout << std::endl; std::cout << std::endl;
} }
/* ************************************************************************* */ /* ************************************************************************* */
double AllDiff::operator()(const Values& values) const { double AllDiff::operator()(const Values& values) const {
std::set < size_t > taken; // record values taken by keys std::set<size_t> taken; // record values taken by keys
for(Key dkey: keys_) { for (Key dkey : keys_) {
size_t value = values.at(dkey); // get the value for that key size_t value = values.at(dkey); // get the value for that key
if (taken.count(value)) return 0.0;// check if value alreday taken if (taken.count(value)) return 0.0; // check if value alreday taken
taken.insert(value);// if not, record it as taken and keep checking taken.insert(value); // if not, record it as taken and keep checking
} }
return 1.0; return 1.0;
} }
/* ************************************************************************* */ /* ************************************************************************* */
DecisionTreeFactor AllDiff::toDecisionTreeFactor() const { DecisionTreeFactor AllDiff::toDecisionTreeFactor() const {
// We will do this by converting the allDif into many BinaryAllDiff constraints // We will do this by converting the allDif into many BinaryAllDiff
// constraints
DecisionTreeFactor converted; DecisionTreeFactor converted;
size_t nrKeys = keys_.size(); size_t nrKeys = keys_.size();
for (size_t i1 = 0; i1 < nrKeys; i1++) for (size_t i1 = 0; i1 < nrKeys; i1++)
for (size_t i2 = i1 + 1; i2 < nrKeys; i2++) { for (size_t i2 = i1 + 1; i2 < nrKeys; i2++) {
BinaryAllDiff binary12(discreteKey(i1),discreteKey(i2)); BinaryAllDiff binary12(discreteKey(i1), discreteKey(i2));
converted = converted * binary12.toDecisionTreeFactor(); converted = converted * binary12.toDecisionTreeFactor();
} }
return converted; return converted;
} }
/* ************************************************************************* */ /* ************************************************************************* */
DecisionTreeFactor AllDiff::operator*(const DecisionTreeFactor& f) const { DecisionTreeFactor AllDiff::operator*(const DecisionTreeFactor& f) const {
// TODO: can we do this more efficiently? // TODO: can we do this more efficiently?
return toDecisionTreeFactor() * f; return toDecisionTreeFactor() * f;
} }
/* ************************************************************************* */ /* ************************************************************************* */
bool AllDiff::ensureArcConsistency(size_t j, std::vector<Domain>& domains) const { bool AllDiff::ensureArcConsistency(size_t j,
std::vector<Domain>& domains) const {
// Though strictly not part of allDiff, we check for // Though strictly not part of allDiff, we check for
// a value in domains[j] that does not occur in any other connected domain. // a value in domains[j] that does not occur in any other connected domain.
// If found, we make this a singleton... // If found, we make this a singleton...
@ -70,7 +69,7 @@ namespace gtsam {
// Check all other domains for singletons and erase corresponding values // Check all other domains for singletons and erase corresponding values
// This is the same as arc-consistency on the equivalent binary constraints // This is the same as arc-consistency on the equivalent binary constraints
bool changed = false; bool changed = false;
for(Key k: keys_) for (Key k : keys_)
if (k != j) { if (k != j) {
const Domain& Dk = domains[k]; const Domain& Dk = domains[k];
if (Dk.isSingleton()) { // check if singleton if (Dk.isSingleton()) { // check if singleton
@ -82,30 +81,29 @@ namespace gtsam {
} }
} }
return changed; return changed;
} }
/* ************************************************************************* */ /* ************************************************************************* */
Constraint::shared_ptr AllDiff::partiallyApply(const Values& values) const { Constraint::shared_ptr AllDiff::partiallyApply(const Values& values) const {
DiscreteKeys newKeys; DiscreteKeys newKeys;
// loop over keys and add them only if they do not appear in values // loop over keys and add them only if they do not appear in values
for(Key k: keys_) for (Key k : keys_)
if (values.find(k) == values.end()) { if (values.find(k) == values.end()) {
newKeys.push_back(DiscreteKey(k,cardinalities_.at(k))); newKeys.push_back(DiscreteKey(k, cardinalities_.at(k)));
} }
return boost::make_shared<AllDiff>(newKeys); return boost::make_shared<AllDiff>(newKeys);
} }
/* ************************************************************************* */ /* ************************************************************************* */
Constraint::shared_ptr AllDiff::partiallyApply( Constraint::shared_ptr AllDiff::partiallyApply(
const std::vector<Domain>& domains) const { const std::vector<Domain>& domains) const {
DiscreteFactor::Values known; DiscreteFactor::Values known;
for(Key k: keys_) { for (Key k : keys_) {
const Domain& Dk = domains[k]; const Domain& Dk = domains[k];
if (Dk.isSingleton()) if (Dk.isSingleton()) known[k] = Dk.firstValue();
known[k] = Dk.firstValue();
} }
return partiallyApply(known); return partiallyApply(known);
} }
/* ************************************************************************* */ /* ************************************************************************* */
} // namespace gtsam } // namespace gtsam

View File

@ -7,44 +7,42 @@
#pragma once #pragma once
#include <gtsam_unstable/discrete/BinaryAllDiff.h>
#include <gtsam/discrete/DiscreteKey.h> #include <gtsam/discrete/DiscreteKey.h>
#include <gtsam_unstable/discrete/BinaryAllDiff.h>
namespace gtsam { namespace gtsam {
/** /**
* General AllDiff constraint * General AllDiff constraint
* Returns 1 if values for all keys are different, 0 otherwise * Returns 1 if values for all keys are different, 0 otherwise
* DiscreteFactors are all awkward in that they have to store two types of keys: * DiscreteFactors are all awkward in that they have to store two types of keys:
* for each variable we have a Key and an Key. In this factor, we * for each variable we have a Key and an Key. In this factor, we
* keep the Indices locally, and the Indices are stored in IndexFactor. * keep the Indices locally, and the Indices are stored in IndexFactor.
*/ */
class GTSAM_UNSTABLE_EXPORT AllDiff: public Constraint { class GTSAM_UNSTABLE_EXPORT AllDiff : public Constraint {
std::map<Key, size_t> cardinalities_;
std::map<Key,size_t> cardinalities_;
DiscreteKey discreteKey(size_t i) const { DiscreteKey discreteKey(size_t i) const {
Key j = keys_[i]; Key j = keys_[i];
return DiscreteKey(j,cardinalities_.at(j)); return DiscreteKey(j, cardinalities_.at(j));
} }
public: public:
/// Constructor /// Constructor
AllDiff(const DiscreteKeys& dkeys); AllDiff(const DiscreteKeys& dkeys);
// print // print
void print(const std::string& s = "", void print(const std::string& s = "", const KeyFormatter& formatter =
const KeyFormatter& formatter = DefaultKeyFormatter) const override; DefaultKeyFormatter) const override;
/// equals /// equals
bool equals(const DiscreteFactor& other, double tol) const override { bool equals(const DiscreteFactor& other, double tol) const override {
if(!dynamic_cast<const AllDiff*>(&other)) if (!dynamic_cast<const AllDiff*>(&other))
return false; return false;
else { else {
const AllDiff& f(static_cast<const AllDiff&>(other)); const AllDiff& f(static_cast<const AllDiff&>(other));
return cardinalities_.size() == f.cardinalities_.size() return cardinalities_.size() == f.cardinalities_.size() &&
&& std::equal(cardinalities_.begin(), cardinalities_.end(), std::equal(cardinalities_.begin(), cardinalities_.end(),
f.cardinalities_.begin()); f.cardinalities_.begin());
} }
} }
@ -65,13 +63,15 @@ namespace gtsam {
* @param j domain to be checked * @param j domain to be checked
* @param domains all other domains * @param domains all other domains
*/ */
bool ensureArcConsistency(size_t j, std::vector<Domain>& domains) const override; bool ensureArcConsistency(size_t j,
std::vector<Domain>& domains) const override;
/// Partially apply known values /// Partially apply known values
Constraint::shared_ptr partiallyApply(const Values&) const override; Constraint::shared_ptr partiallyApply(const Values&) const override;
/// Partially apply known values, domain version /// Partially apply known values, domain version
Constraint::shared_ptr partiallyApply(const std::vector<Domain>&) const override; Constraint::shared_ptr partiallyApply(
}; const std::vector<Domain>&) const override;
};
} // namespace gtsam } // namespace gtsam

View File

@ -7,33 +7,32 @@
#pragma once #pragma once
#include <gtsam_unstable/discrete/Domain.h>
#include <gtsam_unstable/discrete/Constraint.h>
#include <gtsam/discrete/DecisionTreeFactor.h> #include <gtsam/discrete/DecisionTreeFactor.h>
#include <gtsam_unstable/discrete/Constraint.h>
#include <gtsam_unstable/discrete/Domain.h>
namespace gtsam { namespace gtsam {
/** /**
* Binary AllDiff constraint * Binary AllDiff constraint
* Returns 1 if values for two keys are different, 0 otherwise * Returns 1 if values for two keys are different, 0 otherwise
* DiscreteFactors are all awkward in that they have to store two types of keys: * DiscreteFactors are all awkward in that they have to store two types of keys:
* for each variable we have a Index and an Index. In this factor, we * for each variable we have a Index and an Index. In this factor, we
* keep the Indices locally, and the Indices are stored in IndexFactor. * keep the Indices locally, and the Indices are stored in IndexFactor.
*/ */
class BinaryAllDiff: public Constraint { class BinaryAllDiff : public Constraint {
size_t cardinality0_, cardinality1_; /// cardinality size_t cardinality0_, cardinality1_; /// cardinality
public: public:
/// Constructor /// Constructor
BinaryAllDiff(const DiscreteKey& key1, const DiscreteKey& key2) : BinaryAllDiff(const DiscreteKey& key1, const DiscreteKey& key2)
Constraint(key1.first, key2.first), : Constraint(key1.first, key2.first),
cardinality0_(key1.second), cardinality1_(key2.second) { cardinality0_(key1.second),
} cardinality1_(key2.second) {}
// print // print
void print(const std::string& s = "", void print(
const std::string& s = "",
const KeyFormatter& formatter = DefaultKeyFormatter) const override { const KeyFormatter& formatter = DefaultKeyFormatter) const override {
std::cout << s << "BinaryAllDiff on " << formatter(keys_[0]) << " and " std::cout << s << "BinaryAllDiff on " << formatter(keys_[0]) << " and "
<< formatter(keys_[1]) << std::endl; << formatter(keys_[1]) << std::endl;
@ -41,28 +40,28 @@ namespace gtsam {
/// equals /// equals
bool equals(const DiscreteFactor& other, double tol) const override { bool equals(const DiscreteFactor& other, double tol) const override {
if(!dynamic_cast<const BinaryAllDiff*>(&other)) if (!dynamic_cast<const BinaryAllDiff*>(&other))
return false; return false;
else { else {
const BinaryAllDiff& f(static_cast<const BinaryAllDiff&>(other)); const BinaryAllDiff& f(static_cast<const BinaryAllDiff&>(other));
return (cardinality0_==f.cardinality0_) && (cardinality1_==f.cardinality1_); return (cardinality0_ == f.cardinality0_) &&
(cardinality1_ == f.cardinality1_);
} }
} }
/// Calculate value /// Calculate value
double operator()(const Values& values) const override { double operator()(const Values& values) const override {
return (double) (values.at(keys_[0]) != values.at(keys_[1])); return (double)(values.at(keys_[0]) != values.at(keys_[1]));
} }
/// Convert into a decisiontree /// Convert into a decisiontree
DecisionTreeFactor toDecisionTreeFactor() const override { DecisionTreeFactor toDecisionTreeFactor() const override {
DiscreteKeys keys; DiscreteKeys keys;
keys.push_back(DiscreteKey(keys_[0],cardinality0_)); keys.push_back(DiscreteKey(keys_[0], cardinality0_));
keys.push_back(DiscreteKey(keys_[1],cardinality1_)); keys.push_back(DiscreteKey(keys_[1], cardinality1_));
std::vector<double> table; std::vector<double> table;
for (size_t i1 = 0; i1 < cardinality0_; i1++) for (size_t i1 = 0; i1 < cardinality0_; i1++)
for (size_t i2 = 0; i2 < cardinality1_; i2++) for (size_t i2 = 0; i2 < cardinality1_; i2++) table.push_back(i1 != i2);
table.push_back(i1 != i2);
DecisionTreeFactor converted(keys, table); DecisionTreeFactor converted(keys, table);
return converted; return converted;
} }
@ -78,10 +77,10 @@ namespace gtsam {
* @param j domain to be checked * @param j domain to be checked
* @param domains all other domains * @param domains all other domains
*/ */
/// bool ensureArcConsistency(size_t j,
bool ensureArcConsistency(size_t j, std::vector<Domain>& domains) const override { std::vector<Domain>& domains) const override {
// throw std::runtime_error( // throw std::runtime_error(
// "BinaryAllDiff::ensureArcConsistency not implemented"); // "BinaryAllDiff::ensureArcConsistency not implemented");
return false; return false;
} }
@ -95,6 +94,6 @@ namespace gtsam {
const std::vector<Domain>&) const override { const std::vector<Domain>&) const override {
throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented"); throw std::runtime_error("BinaryAllDiff::partiallyApply not implemented");
} }
}; };
} // namespace gtsam } // namespace gtsam

View File

@ -5,29 +5,30 @@
* @author Frank Dellaert * @author Frank Dellaert
*/ */
#include <gtsam_unstable/discrete/Domain.h>
#include <gtsam_unstable/discrete/CSP.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam_unstable/discrete/CSP.h>
#include <gtsam_unstable/discrete/Domain.h>
using namespace std; using namespace std;
namespace gtsam { namespace gtsam {
/// Find the best total assignment - can be expensive /// Find the best total assignment - can be expensive
CSP::sharedValues CSP::optimalAssignment() const { CSP::sharedValues CSP::optimalAssignment() const {
DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(); DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential();
sharedValues mpe = chordal->optimize(); sharedValues mpe = chordal->optimize();
return mpe; return mpe;
} }
/// Find the best total assignment - can be expensive /// Find the best total assignment - can be expensive
CSP::sharedValues CSP::optimalAssignment(const Ordering& ordering) const { CSP::sharedValues CSP::optimalAssignment(const Ordering& ordering) const {
DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(ordering); DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(ordering);
sharedValues mpe = chordal->optimize(); sharedValues mpe = chordal->optimize();
return mpe; return mpe;
} }
void CSP::runArcConsistency(size_t cardinality, size_t nrIterations, bool print) const { void CSP::runArcConsistency(size_t cardinality, size_t nrIterations,
bool print) const {
// Create VariableIndex // Create VariableIndex
VariableIndex index(*this); VariableIndex index(*this);
// index.print(); // index.print();
@ -35,9 +36,9 @@ namespace gtsam {
size_t n = index.size(); size_t n = index.size();
// Initialize domains // Initialize domains
std::vector < Domain > domains; std::vector<Domain> domains;
for (size_t j = 0; j < n; j++) for (size_t j = 0; j < n; j++)
domains.push_back(Domain(DiscreteKey(j,cardinality))); domains.push_back(Domain(DiscreteKey(j, cardinality)));
// Create array of flags indicating a domain changed or not // Create array of flags indicating a domain changed or not
std::vector<bool> changed(n); std::vector<bool> changed(n);
@ -51,13 +52,16 @@ namespace gtsam {
changed[v] = false; changed[v] = false;
// loop over all factors/constraints for variable v // loop over all factors/constraints for variable v
const FactorIndices& factors = index[v]; const FactorIndices& factors = index[v];
for(size_t f: factors) { for (size_t f : factors) {
// if not already a singleton // if not already a singleton
if (!domains[v].isSingleton()) { if (!domains[v].isSingleton()) {
// get the constraint and call its ensureArcConsistency method // get the constraint and call its ensureArcConsistency method
Constraint::shared_ptr constraint = boost::dynamic_pointer_cast<Constraint>((*this)[f]); Constraint::shared_ptr constraint =
if (!constraint) throw runtime_error("CSP:runArcConsistency: non-constraint factor"); boost::dynamic_pointer_cast<Constraint>((*this)[f]);
changed[v] = constraint->ensureArcConsistency(v,domains) || changed[v]; if (!constraint)
throw runtime_error("CSP:runArcConsistency: non-constraint factor");
changed[v] =
constraint->ensureArcConsistency(v, domains) || changed[v];
} }
} // f } // f
if (changed[v]) anyChange = true; if (changed[v]) anyChange = true;
@ -91,13 +95,14 @@ namespace gtsam {
// TODO: create a new ordering as we go, to ensure a connected graph // TODO: create a new ordering as we go, to ensure a connected graph
// KeyOrdering ordering; // KeyOrdering ordering;
// vector<Index> dkeys; // vector<Index> dkeys;
for(const DiscreteFactor::shared_ptr& f: factors_) { for (const DiscreteFactor::shared_ptr& f : factors_) {
Constraint::shared_ptr constraint = boost::dynamic_pointer_cast<Constraint>(f); Constraint::shared_ptr constraint =
if (!constraint) throw runtime_error("CSP:runArcConsistency: non-constraint factor"); boost::dynamic_pointer_cast<Constraint>(f);
if (!constraint)
throw runtime_error("CSP:runArcConsistency: non-constraint factor");
Constraint::shared_ptr reduced = constraint->partiallyApply(domains); Constraint::shared_ptr reduced = constraint->partiallyApply(domains);
if (print) reduced->print(); if (print) reduced->print();
} }
#endif #endif
} }
} // gtsam } // namespace gtsam

View File

@ -7,30 +7,28 @@
#pragma once #pragma once
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam_unstable/discrete/AllDiff.h> #include <gtsam_unstable/discrete/AllDiff.h>
#include <gtsam_unstable/discrete/SingleValue.h> #include <gtsam_unstable/discrete/SingleValue.h>
#include <gtsam/discrete/DiscreteFactorGraph.h>
namespace gtsam { namespace gtsam {
/** /**
* Constraint Satisfaction Problem class * Constraint Satisfaction Problem class
* A specialization of a DiscreteFactorGraph. * A specialization of a DiscreteFactorGraph.
* It knows about CSP-specific constraints and algorithms * It knows about CSP-specific constraints and algorithms
*/ */
class GTSAM_UNSTABLE_EXPORT CSP: public DiscreteFactorGraph { class GTSAM_UNSTABLE_EXPORT CSP : public DiscreteFactorGraph {
public: public:
/** A map from keys to values */ /** A map from keys to values */
typedef KeyVector Indices; typedef KeyVector Indices;
typedef Assignment<Key> Values; typedef Assignment<Key> Values;
typedef boost::shared_ptr<Values> sharedValues; typedef boost::shared_ptr<Values> sharedValues;
public: public:
// /// Constructor
// /// Constructor // CSP() {
// CSP() { // }
// }
/// Add a unary constraint, allowing only a single value /// Add a unary constraint, allowing only a single value
void addSingleValue(const DiscreteKey& dkey, size_t value) { void addSingleValue(const DiscreteKey& dkey, size_t value) {
@ -40,8 +38,7 @@ namespace gtsam {
/// Add a binary AllDiff constraint /// Add a binary AllDiff constraint
void addAllDiff(const DiscreteKey& key1, const DiscreteKey& key2) { void addAllDiff(const DiscreteKey& key1, const DiscreteKey& key2) {
boost::shared_ptr<BinaryAllDiff> factor( boost::shared_ptr<BinaryAllDiff> factor(new BinaryAllDiff(key1, key2));
new BinaryAllDiff(key1, key2));
push_back(factor); push_back(factor);
} }
@ -51,13 +48,13 @@ namespace gtsam {
push_back(factor); push_back(factor);
} }
// /** return product of all factors as a single factor */ // /** return product of all factors as a single factor */
// DecisionTreeFactor product() const { // DecisionTreeFactor product() const {
// DecisionTreeFactor result; // DecisionTreeFactor result;
// for(const sharedFactor& factor: *this) // for(const sharedFactor& factor: *this)
// if (factor) result = (*factor) * result; // if (factor) result = (*factor) * result;
// return result; // return result;
// } // }
/// Find the best total assignment - can be expensive /// Find the best total assignment - can be expensive
sharedValues optimalAssignment() const; sharedValues optimalAssignment() const;
@ -65,16 +62,17 @@ namespace gtsam {
/// Find the best total assignment - can be expensive /// Find the best total assignment - can be expensive
sharedValues optimalAssignment(const Ordering& ordering) const; sharedValues optimalAssignment(const Ordering& ordering) const;
// /* // /*
// * Perform loopy belief propagation // * Perform loopy belief propagation
// * True belief propagation would check for each value in domain // * True belief propagation would check for each value in domain
// * whether any satisfying separator assignment can be found. // * whether any satisfying separator assignment can be found.
// * This corresponds to hyper-arc consistency in CSP speak. // * This corresponds to hyper-arc consistency in CSP speak.
// * This can be done by creating a mini-factor graph and search. // * This can be done by creating a mini-factor graph and search.
// * For a nine-by-nine Sudoku, the search tree will be 8+6+6=20 levels deep. // * For a nine-by-nine Sudoku, the search tree will be 8+6+6=20 levels
// * It will be very expensive to exclude values that way. // deep.
// */ // * It will be very expensive to exclude values that way.
// void applyBeliefPropagation(size_t nrIterations = 10) const; // */
// void applyBeliefPropagation(size_t nrIterations = 10) const;
/* /*
* Apply arc-consistency ~ Approximate loopy belief propagation * Apply arc-consistency ~ Approximate loopy belief propagation
@ -84,7 +82,6 @@ namespace gtsam {
*/ */
void runArcConsistency(size_t cardinality, size_t nrIterations = 10, void runArcConsistency(size_t cardinality, size_t nrIterations = 10,
bool print = false) const; bool print = false) const;
}; // CSP }; // CSP
} // gtsam
} // namespace gtsam

View File

@ -17,49 +17,40 @@
#pragma once #pragma once
#include <gtsam_unstable/dllexport.h>
#include <gtsam/discrete/DiscreteFactor.h> #include <gtsam/discrete/DiscreteFactor.h>
#include <gtsam_unstable/dllexport.h>
#include <boost/assign.hpp> #include <boost/assign.hpp>
namespace gtsam { namespace gtsam {
class Domain; class Domain;
/** /**
* Base class for discrete probabilistic factors * Base class for discrete probabilistic factors
* The most general one is the derived DecisionTreeFactor * The most general one is the derived DecisionTreeFactor
*/ */
class Constraint : public DiscreteFactor { class Constraint : public DiscreteFactor {
public: public:
typedef boost::shared_ptr<Constraint> shared_ptr; typedef boost::shared_ptr<Constraint> shared_ptr;
protected: protected:
/// Construct n-way factor /// Construct n-way factor
Constraint(const KeyVector& js) : Constraint(const KeyVector& js) : DiscreteFactor(js) {}
DiscreteFactor(js) {
}
/// Construct unary factor /// Construct unary factor
Constraint(Key j) : Constraint(Key j) : DiscreteFactor(boost::assign::cref_list_of<1>(j)) {}
DiscreteFactor(boost::assign::cref_list_of<1>(j)) {
}
/// Construct binary factor /// Construct binary factor
Constraint(Key j1, Key j2) : Constraint(Key j1, Key j2)
DiscreteFactor(boost::assign::cref_list_of<2>(j1)(j2)) { : DiscreteFactor(boost::assign::cref_list_of<2>(j1)(j2)) {}
}
/// construct from container /// construct from container
template<class KeyIterator> template <class KeyIterator>
Constraint(KeyIterator beginKey, KeyIterator endKey) : Constraint(KeyIterator beginKey, KeyIterator endKey)
DiscreteFactor(beginKey, endKey) { : DiscreteFactor(beginKey, endKey) {}
}
public: public:
/// @name Standard Constructors /// @name Standard Constructors
/// @{ /// @{
@ -78,16 +69,16 @@ namespace gtsam {
* @param j domain to be checked * @param j domain to be checked
* @param domains all other domains * @param domains all other domains
*/ */
virtual bool ensureArcConsistency(size_t j, std::vector<Domain>& domains) const = 0; virtual bool ensureArcConsistency(size_t j,
std::vector<Domain>& domains) const = 0;
/// Partially apply known values /// Partially apply known values
virtual shared_ptr partiallyApply(const Values&) const = 0; virtual shared_ptr partiallyApply(const Values&) const = 0;
/// Partially apply known values, domain version /// Partially apply known values, domain version
virtual shared_ptr partiallyApply(const std::vector<Domain>&) const = 0; virtual shared_ptr partiallyApply(const std::vector<Domain>&) const = 0;
/// @} /// @}
}; };
// DiscreteFactor // DiscreteFactor
}// namespace gtsam } // namespace gtsam

View File

@ -5,92 +5,89 @@
* @author Frank Dellaert * @author Frank Dellaert
*/ */
#include <gtsam_unstable/discrete/Domain.h>
#include <gtsam/discrete/DecisionTreeFactor.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam/discrete/DecisionTreeFactor.h>
#include <gtsam_unstable/discrete/Domain.h>
#include <boost/make_shared.hpp> #include <boost/make_shared.hpp>
namespace gtsam { namespace gtsam {
using namespace std; using namespace std;
/* ************************************************************************* */ /* ************************************************************************* */
void Domain::print(const string& s, void Domain::print(const string& s, const KeyFormatter& formatter) const {
const KeyFormatter& formatter) const { // cout << s << ": Domain on " << formatter(keys_[0]) << " (j=" <<
// cout << s << ": Domain on " << formatter(keys_[0]) << " (j=" << // formatter(keys_[0]) << ") with values";
// formatter(keys_[0]) << ") with values"; // for (size_t v: values_) cout << " " << v;
// for (size_t v: values_) cout << " " << v; // cout << endl;
// cout << endl; for (size_t v : values_) cout << v;
for (size_t v: values_) cout << v; }
}
/* ************************************************************************* */ /* ************************************************************************* */
double Domain::operator()(const Values& values) const { double Domain::operator()(const Values& values) const {
return contains(values.at(keys_[0])); return contains(values.at(keys_[0]));
} }
/* ************************************************************************* */ /* ************************************************************************* */
DecisionTreeFactor Domain::toDecisionTreeFactor() const { DecisionTreeFactor Domain::toDecisionTreeFactor() const {
DiscreteKeys keys; DiscreteKeys keys;
keys += DiscreteKey(keys_[0],cardinality_); keys += DiscreteKey(keys_[0], cardinality_);
vector<double> table; vector<double> table;
for (size_t i1 = 0; i1 < cardinality_; ++i1) for (size_t i1 = 0; i1 < cardinality_; ++i1) table.push_back(contains(i1));
table.push_back(contains(i1));
DecisionTreeFactor converted(keys, table); DecisionTreeFactor converted(keys, table);
return converted; return converted;
} }
/* ************************************************************************* */ /* ************************************************************************* */
DecisionTreeFactor Domain::operator*(const DecisionTreeFactor& f) const { DecisionTreeFactor Domain::operator*(const DecisionTreeFactor& f) const {
// TODO: can we do this more efficiently? // TODO: can we do this more efficiently?
return toDecisionTreeFactor() * f; return toDecisionTreeFactor() * f;
} }
/* ************************************************************************* */ /* ************************************************************************* */
bool Domain::ensureArcConsistency(size_t j, vector<Domain>& domains) const { bool Domain::ensureArcConsistency(size_t j, vector<Domain>& domains) const {
if (j != keys_[0]) throw invalid_argument("Domain check on wrong domain"); if (j != keys_[0]) throw invalid_argument("Domain check on wrong domain");
Domain& D = domains[j]; Domain& D = domains[j];
for(size_t value: values_) for (size_t value : values_)
if (!D.contains(value)) throw runtime_error("Unsatisfiable"); if (!D.contains(value)) throw runtime_error("Unsatisfiable");
D = *this; D = *this;
return true; return true;
} }
/* ************************************************************************* */ /* ************************************************************************* */
bool Domain::checkAllDiff(const KeyVector keys, vector<Domain>& domains) { bool Domain::checkAllDiff(const KeyVector keys, vector<Domain>& domains) {
Key j = keys_[0]; Key j = keys_[0];
// for all values in this domain // for all values in this domain
for(size_t value: values_) { for (size_t value : values_) {
// for all connected domains // for all connected domains
for(Key k: keys) for (Key k : keys)
// if any domain contains the value we cannot make this domain singleton // if any domain contains the value we cannot make this domain singleton
if (k!=j && domains[k].contains(value)) if (k != j && domains[k].contains(value)) goto found;
goto found;
values_.clear(); values_.clear();
values_.insert(value); values_.insert(value);
return true; // we changed it return true; // we changed it
found:; found:;
} }
return false; // we did not change it return false; // we did not change it
} }
/* ************************************************************************* */ /* ************************************************************************* */
Constraint::shared_ptr Domain::partiallyApply( Constraint::shared_ptr Domain::partiallyApply(const Values& values) const {
const Values& values) const {
Values::const_iterator it = values.find(keys_[0]); Values::const_iterator it = values.find(keys_[0]);
if (it != values.end() && !contains(it->second)) throw runtime_error( if (it != values.end() && !contains(it->second))
"Domain::partiallyApply: unsatisfiable"); throw runtime_error("Domain::partiallyApply: unsatisfiable");
return boost::make_shared < Domain > (*this); return boost::make_shared<Domain>(*this);
} }
/* ************************************************************************* */ /* ************************************************************************* */
Constraint::shared_ptr Domain::partiallyApply( Constraint::shared_ptr Domain::partiallyApply(
const vector<Domain>& domains) const { const vector<Domain>& domains) const {
const Domain& Dk = domains[keys_[0]]; const Domain& Dk = domains[keys_[0]];
if (Dk.isSingleton() && !contains(*Dk.begin())) throw runtime_error( if (Dk.isSingleton() && !contains(*Dk.begin()))
"Domain::partiallyApply: unsatisfiable"); throw runtime_error("Domain::partiallyApply: unsatisfiable");
return boost::make_shared < Domain > (Dk); return boost::make_shared<Domain>(Dk);
} }
/* ************************************************************************* */ /* ************************************************************************* */
} // namespace gtsam } // namespace gtsam

View File

@ -7,81 +7,65 @@
#pragma once #pragma once
#include <gtsam_unstable/discrete/Constraint.h>
#include <gtsam/discrete/DiscreteKey.h> #include <gtsam/discrete/DiscreteKey.h>
#include <gtsam_unstable/discrete/Constraint.h>
namespace gtsam { namespace gtsam {
/** /**
* Domain restriction constraint * Domain restriction constraint
*/ */
class GTSAM_UNSTABLE_EXPORT Domain: public Constraint { class GTSAM_UNSTABLE_EXPORT Domain : public Constraint {
size_t cardinality_; /// Cardinality size_t cardinality_; /// Cardinality
std::set<size_t> values_; /// allowed values std::set<size_t> values_; /// allowed values
public: public:
typedef boost::shared_ptr<Domain> shared_ptr; typedef boost::shared_ptr<Domain> shared_ptr;
// Constructor on Discrete Key initializes an "all-allowed" domain // Constructor on Discrete Key initializes an "all-allowed" domain
Domain(const DiscreteKey& dkey) : Domain(const DiscreteKey& dkey)
Constraint(dkey.first), cardinality_(dkey.second) { : Constraint(dkey.first), cardinality_(dkey.second) {
for (size_t v = 0; v < cardinality_; v++) for (size_t v = 0; v < cardinality_; v++) values_.insert(v);
values_.insert(v);
} }
// Constructor on Discrete Key with single allowed value // Constructor on Discrete Key with single allowed value
// Consider SingleValue constraint // Consider SingleValue constraint
Domain(const DiscreteKey& dkey, size_t v) : Domain(const DiscreteKey& dkey, size_t v)
Constraint(dkey.first), cardinality_(dkey.second) { : Constraint(dkey.first), cardinality_(dkey.second) {
values_.insert(v); values_.insert(v);
} }
/// Constructor /// Constructor
Domain(const Domain& other) : Domain(const Domain& other)
Constraint(other.keys_[0]), values_(other.values_) { : Constraint(other.keys_[0]), values_(other.values_) {}
}
/// insert a value, non const :-( /// insert a value, non const :-(
void insert(size_t value) { void insert(size_t value) { values_.insert(value); }
values_.insert(value);
}
/// erase a value, non const :-( /// erase a value, non const :-(
void erase(size_t value) { void erase(size_t value) { values_.erase(value); }
values_.erase(value);
}
size_t nrValues() const { size_t nrValues() const { return values_.size(); }
return values_.size();
}
bool isSingleton() const { bool isSingleton() const { return nrValues() == 1; }
return nrValues() == 1;
}
size_t firstValue() const { size_t firstValue() const { return *values_.begin(); }
return *values_.begin();
}
// print // print
void print(const std::string& s = "", void print(const std::string& s = "", const KeyFormatter& formatter =
const KeyFormatter& formatter = DefaultKeyFormatter) const override; DefaultKeyFormatter) const override;
/// equals /// equals
bool equals(const DiscreteFactor& other, double tol) const override { bool equals(const DiscreteFactor& other, double tol) const override {
if(!dynamic_cast<const Domain*>(&other)) if (!dynamic_cast<const Domain*>(&other))
return false; return false;
else { else {
const Domain& f(static_cast<const Domain&>(other)); const Domain& f(static_cast<const Domain&>(other));
return (cardinality_==f.cardinality_) && (values_==f.values_); return (cardinality_ == f.cardinality_) && (values_ == f.values_);
} }
} }
bool contains(size_t value) const { bool contains(size_t value) const { return values_.count(value) > 0; }
return values_.count(value)>0;
}
/// Calculate value /// Calculate value
double operator()(const Values& values) const override; double operator()(const Values& values) const override;
@ -97,11 +81,13 @@ namespace gtsam {
* @param j domain to be checked * @param j domain to be checked
* @param domains all other domains * @param domains all other domains
*/ */
bool ensureArcConsistency(size_t j, std::vector<Domain>& domains) const override; bool ensureArcConsistency(size_t j,
std::vector<Domain>& domains) const override;
/** /**
* Check for a value in domain that does not occur in any other connected domain. * Check for a value in domain that does not occur in any other connected
* If found, we make this a singleton... Called in AllDiff::ensureArcConsistency * domain. If found, we make this a singleton... Called in
* AllDiff::ensureArcConsistency
* @param keys connected domains through alldiff * @param keys connected domains through alldiff
*/ */
bool checkAllDiff(const KeyVector keys, std::vector<Domain>& domains); bool checkAllDiff(const KeyVector keys, std::vector<Domain>& domains);
@ -112,6 +98,6 @@ namespace gtsam {
/// Partially apply known values, domain version /// Partially apply known values, domain version
Constraint::shared_ptr partiallyApply( Constraint::shared_ptr partiallyApply(
const std::vector<Domain>& domains) const override; const std::vector<Domain>& domains) const override;
}; };
} // namespace gtsam } // namespace gtsam

View File

@ -5,24 +5,22 @@
* @author Frank Dellaert * @author Frank Dellaert
*/ */
#include <gtsam_unstable/discrete/Scheduler.h>
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam/base/debug.h> #include <gtsam/base/debug.h>
#include <gtsam/base/timing.h> #include <gtsam/base/timing.h>
#include <gtsam/discrete/DiscreteFactorGraph.h>
#include <gtsam_unstable/discrete/Scheduler.h>
#include <boost/tokenizer.hpp> #include <boost/tokenizer.hpp>
#include <cmath>
#include <fstream> #include <fstream>
#include <iomanip> #include <iomanip>
#include <cmath>
namespace gtsam { namespace gtsam {
using namespace std; using namespace std;
Scheduler::Scheduler(size_t maxNrStudents, const string& filename): Scheduler::Scheduler(size_t maxNrStudents, const string& filename)
maxNrStudents_(maxNrStudents) : maxNrStudents_(maxNrStudents) {
{
typedef boost::tokenizer<boost::escaped_list_separator<char> > Tokenizer; typedef boost::tokenizer<boost::escaped_list_separator<char> > Tokenizer;
// open file // open file
@ -38,8 +36,7 @@ namespace gtsam {
if (getline(is, line, '\r')) { if (getline(is, line, '\r')) {
Tokenizer tok(line); Tokenizer tok(line);
Tokenizer::iterator it = tok.begin(); Tokenizer::iterator it = tok.begin();
for (++it; it != tok.end(); ++it) for (++it; it != tok.end(); ++it) addFaculty(*it);
addFaculty(*it);
} }
// for all remaining lines // for all remaining lines
@ -50,17 +47,16 @@ namespace gtsam {
Tokenizer::iterator it = tok.begin(); Tokenizer::iterator it = tok.begin();
addSlot(*it++); // add slot addSlot(*it++); // add slot
// add availability // add availability
for (; it != tok.end(); ++it) for (; it != tok.end(); ++it) available_ += (it->empty()) ? "0 " : "1 ";
available_ += (it->empty()) ? "0 " : "1 ";
available_ += '\n'; available_ += '\n';
} }
} // constructor } // constructor
/** addStudent has to be called after adding slots and faculty */ /** addStudent has to be called after adding slots and faculty */
void Scheduler::addStudent(const string& studentName, void Scheduler::addStudent(const string& studentName, const string& area1,
const string& area1, const string& area2, const string& area2, const string& area3,
const string& area3, const string& advisor) { const string& advisor) {
assert(nrStudents()<maxNrStudents_); assert(nrStudents() < maxNrStudents_);
assert(facultyInArea_.count(area1)); assert(facultyInArea_.count(area1));
assert(facultyInArea_.count(area2)); assert(facultyInArea_.count(area2));
assert(facultyInArea_.count(area3)); assert(facultyInArea_.count(area3));
@ -69,71 +65,73 @@ namespace gtsam {
student.name_ = studentName; student.name_ = studentName;
// We fix the ordering by assigning a higher index to the student // We fix the ordering by assigning a higher index to the student
// and numbering the areas lower // and numbering the areas lower
Key j = 3*maxNrStudents_ + nrStudents(); Key j = 3 * maxNrStudents_ + nrStudents();
student.key_ = DiscreteKey(j, nrTimeSlots()); student.key_ = DiscreteKey(j, nrTimeSlots());
Key base = 3*nrStudents(); Key base = 3 * nrStudents();
student.keys_[0] = DiscreteKey(base+0, nrFaculty()); student.keys_[0] = DiscreteKey(base + 0, nrFaculty());
student.keys_[1] = DiscreteKey(base+1, nrFaculty()); student.keys_[1] = DiscreteKey(base + 1, nrFaculty());
student.keys_[2] = DiscreteKey(base+2, nrFaculty()); student.keys_[2] = DiscreteKey(base + 2, nrFaculty());
student.areaName_[0] = area1; student.areaName_[0] = area1;
student.areaName_[1] = area2; student.areaName_[1] = area2;
student.areaName_[2] = area3; student.areaName_[2] = area3;
students_.push_back(student); students_.push_back(student);
} }
/** get key for student and area, 0 is time slot itself */ /** get key for student and area, 0 is time slot itself */
const DiscreteKey& Scheduler::key(size_t s, boost::optional<size_t> area) const { const DiscreteKey& Scheduler::key(size_t s,
boost::optional<size_t> area) const {
return area ? students_[s].keys_[*area] : students_[s].key_; return area ? students_[s].keys_[*area] : students_[s].key_;
} }
const string& Scheduler::studentName(size_t i) const { const string& Scheduler::studentName(size_t i) const {
assert(i<nrStudents()); assert(i < nrStudents());
return students_[i].name_; return students_[i].name_;
} }
const DiscreteKey& Scheduler::studentKey(size_t i) const { const DiscreteKey& Scheduler::studentKey(size_t i) const {
assert(i<nrStudents()); assert(i < nrStudents());
return students_[i].key_; return students_[i].key_;
} }
const string& Scheduler::studentArea(size_t i, size_t area) const { const string& Scheduler::studentArea(size_t i, size_t area) const {
assert(i<nrStudents()); assert(i < nrStudents());
return students_[i].areaName_[area]; return students_[i].areaName_[area];
} }
/** Add student-specific constraints to the graph */ /** Add student-specific constraints to the graph */
void Scheduler::addStudentSpecificConstraints(size_t i, boost::optional<size_t> slot) { void Scheduler::addStudentSpecificConstraints(size_t i,
boost::optional<size_t> slot) {
bool debug = ISDEBUG("Scheduler::buildGraph"); bool debug = ISDEBUG("Scheduler::buildGraph");
assert(i<nrStudents()); assert(i < nrStudents());
const Student& s = students_[i]; const Student& s = students_[i];
if (!slot && !slotsAvailable_.empty()) { if (!slot && !slotsAvailable_.empty()) {
if (debug) cout << "Adding availability of slots" << endl; if (debug) cout << "Adding availability of slots" << endl;
assert(slotsAvailable_.size()==s.key_.second); assert(slotsAvailable_.size() == s.key_.second);
CSP::add(s.key_, slotsAvailable_); CSP::add(s.key_, slotsAvailable_);
} }
// For all areas // For all areas
for (size_t area = 0; area < 3; area++) { for (size_t area = 0; area < 3; area++) {
DiscreteKey areaKey = s.keys_[area]; DiscreteKey areaKey = s.keys_[area];
const string& areaName = s.areaName_[area]; const string& areaName = s.areaName_[area];
if (debug) cout << "Area constraints " << areaName << endl; if (debug) cout << "Area constraints " << areaName << endl;
assert(facultyInArea_[areaName].size()==areaKey.second); assert(facultyInArea_[areaName].size() == areaKey.second);
CSP::add(areaKey, facultyInArea_[areaName]); CSP::add(areaKey, facultyInArea_[areaName]);
if (debug) cout << "Advisor constraint " << areaName << endl; if (debug) cout << "Advisor constraint " << areaName << endl;
assert(s.advisor_.size()==areaKey.second); assert(s.advisor_.size() == areaKey.second);
CSP::add(areaKey, s.advisor_); CSP::add(areaKey, s.advisor_);
if (debug) cout << "Availability of faculty " << areaName << endl; if (debug) cout << "Availability of faculty " << areaName << endl;
if (slot) { if (slot) {
// get all constraints then specialize to slot // get all constraints then specialize to slot
size_t dummyIndex = maxNrStudents_*3+maxNrStudents_; size_t dummyIndex = maxNrStudents_ * 3 + maxNrStudents_;
DiscreteKey dummy(dummyIndex, nrTimeSlots()); DiscreteKey dummy(dummyIndex, nrTimeSlots());
Potentials::ADT p(dummy & areaKey, available_); // available_ is Doodle string Potentials::ADT p(dummy & areaKey,
available_); // available_ is Doodle string
Potentials::ADT q = p.choose(dummyIndex, *slot); Potentials::ADT q = p.choose(dummyIndex, *slot);
DiscreteFactor::shared_ptr f(new DecisionTreeFactor(areaKey, q)); DiscreteFactor::shared_ptr f(new DecisionTreeFactor(areaKey, q));
CSP::push_back(f); CSP::push_back(f);
@ -145,25 +143,22 @@ namespace gtsam {
// add mutex // add mutex
if (debug) cout << "Mutex for faculty" << endl; if (debug) cout << "Mutex for faculty" << endl;
addAllDiff(s.keys_[0] & s.keys_[1] & s.keys_[2]); addAllDiff(s.keys_[0] & s.keys_[1] & s.keys_[2]);
} }
/** Main routine that builds factor graph */
/** Main routine that builds factor graph */ void Scheduler::buildGraph(size_t mutexBound) {
void Scheduler::buildGraph(size_t mutexBound) {
bool debug = ISDEBUG("Scheduler::buildGraph"); bool debug = ISDEBUG("Scheduler::buildGraph");
if (debug) cout << "Adding student-specific constraints" << endl; if (debug) cout << "Adding student-specific constraints" << endl;
for (size_t i = 0; i < nrStudents(); i++) for (size_t i = 0; i < nrStudents(); i++) addStudentSpecificConstraints(i);
addStudentSpecificConstraints(i);
// special constraint for MN // special constraint for MN
if (studentName(0) == "Michael N") CSP::add(studentKey(0), if (studentName(0) == "Michael N")
"0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1"); CSP::add(studentKey(0), "0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1");
if (!mutexBound) { if (!mutexBound) {
DiscreteKeys dkeys; DiscreteKeys dkeys;
for(const Student& s: students_) for (const Student& s : students_) dkeys.push_back(s.key_);
dkeys.push_back(s.key_);
addAllDiff(dkeys); addAllDiff(dkeys);
} else { } else {
if (debug) cout << "Mutex for Students" << endl; if (debug) cout << "Mutex for Students" << endl;
@ -175,103 +170,98 @@ namespace gtsam {
} }
} }
} }
} // buildGraph } // buildGraph
/** print */ /** print */
void Scheduler::print(const string& s, const KeyFormatter& formatter) const { void Scheduler::print(const string& s, const KeyFormatter& formatter) const {
cout << s << " Faculty:" << endl; cout << s << " Faculty:" << endl;
for(const string& name: facultyName_) for (const string& name : facultyName_) cout << name << '\n';
cout << name << '\n';
cout << endl; cout << endl;
cout << s << " Slots:\n"; cout << s << " Slots:\n";
size_t i = 0; size_t i = 0;
for(const string& name: slotName_) for (const string& name : slotName_) cout << i++ << " " << name << endl;
cout << i++ << " " << name << endl;
cout << endl; cout << endl;
cout << "Availability:\n" << available_ << '\n'; cout << "Availability:\n" << available_ << '\n';
cout << s << " Area constraints:\n"; cout << s << " Area constraints:\n";
for(const FacultyInArea::value_type& it: facultyInArea_) for (const FacultyInArea::value_type& it : facultyInArea_) {
{
cout << setw(12) << it.first << ": "; cout << setw(12) << it.first << ": ";
for(double v: it.second) for (double v : it.second) cout << v << " ";
cout << v << " ";
cout << '\n'; cout << '\n';
} }
cout << endl; cout << endl;
cout << s << " Students:\n"; cout << s << " Students:\n";
for (const Student& student: students_) for (const Student& student : students_) student.print();
student.print();
cout << endl; cout << endl;
CSP::print(s + " Factor graph"); CSP::print(s + " Factor graph");
cout << endl; cout << endl;
} // print } // print
/** Print readable form of assignment */ /** Print readable form of assignment */
void Scheduler::printAssignment(sharedValues assignment) const { void Scheduler::printAssignment(sharedValues assignment) const {
// Not intended to be general! Assumes very particular ordering ! // Not intended to be general! Assumes very particular ordering !
cout << endl; cout << endl;
for (size_t s = 0; s < nrStudents(); s++) { for (size_t s = 0; s < nrStudents(); s++) {
Key j = 3*maxNrStudents_ + s; Key j = 3 * maxNrStudents_ + s;
size_t slot = assignment->at(j); size_t slot = assignment->at(j);
cout << studentName(s) << " slot: " << slotName_[slot] << endl; cout << studentName(s) << " slot: " << slotName_[slot] << endl;
Key base = 3*s; Key base = 3 * s;
for (size_t area = 0; area < 3; area++) { for (size_t area = 0; area < 3; area++) {
size_t faculty = assignment->at(base+area); size_t faculty = assignment->at(base + area);
cout << setw(12) << studentArea(s,area) << ": " << facultyName_[faculty] cout << setw(12) << studentArea(s, area) << ": " << facultyName_[faculty]
<< endl; << endl;
} }
cout << endl; cout << endl;
} }
} }
/** Special print for single-student case */ /** Special print for single-student case */
void Scheduler::printSpecial(sharedValues assignment) const { void Scheduler::printSpecial(sharedValues assignment) const {
Values::const_iterator it = assignment->begin(); Values::const_iterator it = assignment->begin();
for (size_t area = 0; area < 3; area++, it++) { for (size_t area = 0; area < 3; area++, it++) {
size_t f = it->second; size_t f = it->second;
cout << setw(12) << studentArea(0,area) << ": " << facultyName_[f] << endl; cout << setw(12) << studentArea(0, area) << ": " << facultyName_[f] << endl;
} }
cout << endl; cout << endl;
} }
/** Accumulate faculty stats */ /** Accumulate faculty stats */
void Scheduler::accumulateStats(sharedValues assignment, vector< void Scheduler::accumulateStats(sharedValues assignment,
size_t>& stats) const { vector<size_t>& stats) const {
for (size_t s = 0; s < nrStudents(); s++) { for (size_t s = 0; s < nrStudents(); s++) {
Key base = 3*s; Key base = 3 * s;
for (size_t area = 0; area < 3; area++) { for (size_t area = 0; area < 3; area++) {
size_t f = assignment->at(base+area); size_t f = assignment->at(base + area);
assert(f<stats.size()); assert(f < stats.size());
stats[f]++; stats[f]++;
} // area } // area
} // s } // s
} }
/** Eliminate, return a Bayes net */ /** Eliminate, return a Bayes net */
DiscreteBayesNet::shared_ptr Scheduler::eliminate() const { DiscreteBayesNet::shared_ptr Scheduler::eliminate() const {
gttic(my_eliminate); gttic(my_eliminate);
// TODO: fix this!! // TODO: fix this!!
size_t maxKey = keys().size(); size_t maxKey = keys().size();
Ordering defaultKeyOrdering; Ordering defaultKeyOrdering;
for (size_t i = 0; i<maxKey; ++i) for (size_t i = 0; i < maxKey; ++i) defaultKeyOrdering += Key(i);
defaultKeyOrdering += Key(i); DiscreteBayesNet::shared_ptr chordal =
DiscreteBayesNet::shared_ptr chordal = this->eliminateSequential(defaultKeyOrdering); this->eliminateSequential(defaultKeyOrdering);
gttoc(my_eliminate); gttoc(my_eliminate);
return chordal; return chordal;
} }
/** Find the best total assignment - can be expensive */ /** Find the best total assignment - can be expensive */
Scheduler::sharedValues Scheduler::optimalAssignment() const { Scheduler::sharedValues Scheduler::optimalAssignment() const {
DiscreteBayesNet::shared_ptr chordal = eliminate(); DiscreteBayesNet::shared_ptr chordal = eliminate();
if (ISDEBUG("Scheduler::optimalAssignment")) { if (ISDEBUG("Scheduler::optimalAssignment")) {
DiscreteBayesNet::const_iterator it = chordal->end()-1; DiscreteBayesNet::const_iterator it = chordal->end() - 1;
const Student & student = students_.front(); const Student& student = students_.front();
cout << endl; cout << endl;
(*it)->print(student.name_); (*it)->print(student.name_);
} }
@ -280,23 +270,21 @@ namespace gtsam {
sharedValues mpe = chordal->optimize(); sharedValues mpe = chordal->optimize();
gttoc(my_optimize); gttoc(my_optimize);
return mpe; return mpe;
} }
/** find the assignment of students to slots with most possible committees */ /** find the assignment of students to slots with most possible committees */
Scheduler::sharedValues Scheduler::bestSchedule() const { Scheduler::sharedValues Scheduler::bestSchedule() const {
sharedValues best; sharedValues best;
throw runtime_error("bestSchedule not implemented"); throw runtime_error("bestSchedule not implemented");
return best; return best;
} }
/** find the corresponding most desirable committee assignment */ /** find the corresponding most desirable committee assignment */
Scheduler::sharedValues Scheduler::bestAssignment( Scheduler::sharedValues Scheduler::bestAssignment(
sharedValues bestSchedule) const { sharedValues bestSchedule) const {
sharedValues best; sharedValues best;
throw runtime_error("bestAssignment not implemented"); throw runtime_error("bestAssignment not implemented");
return best; return best;
} }
} // gtsam
} // namespace gtsam

View File

@ -11,17 +11,15 @@
namespace gtsam { namespace gtsam {
/** /**
* Scheduler class * Scheduler class
* Creates one variable for each student, and three variables for each * Creates one variable for each student, and three variables for each
* of the student's areas, for a total of 4*nrStudents variables. * of the student's areas, for a total of 4*nrStudents variables.
* The "student" variable will determine when the student takes the qual. * The "student" variable will determine when the student takes the qual.
* The "area" variables determine which faculty are on his/her committee. * The "area" variables determine which faculty are on his/her committee.
*/ */
class GTSAM_UNSTABLE_EXPORT Scheduler : public CSP { class GTSAM_UNSTABLE_EXPORT Scheduler : public CSP {
private: private:
/** Internal data structure for students */ /** Internal data structure for students */
struct Student { struct Student {
std::string name_; std::string name_;
@ -29,15 +27,14 @@ namespace gtsam {
std::vector<DiscreteKey> keys_; // key for areas std::vector<DiscreteKey> keys_; // key for areas
std::vector<std::string> areaName_; std::vector<std::string> areaName_;
std::vector<double> advisor_; std::vector<double> advisor_;
Student(size_t nrFaculty, size_t advisorIndex) : Student(size_t nrFaculty, size_t advisorIndex)
keys_(3), areaName_(3), advisor_(nrFaculty, 1.0) { : keys_(3), areaName_(3), advisor_(nrFaculty, 1.0) {
advisor_[advisorIndex] = 0.0; advisor_[advisorIndex] = 0.0;
} }
void print() const { void print() const {
using std::cout; using std::cout;
cout << name_ << ": "; cout << name_ << ": ";
for (size_t area = 0; area < 3; area++) for (size_t area = 0; area < 3; area++) cout << areaName_[area] << " ";
cout << areaName_[area] << " ";
cout << std::endl; cout << std::endl;
} }
}; };
@ -63,7 +60,6 @@ namespace gtsam {
std::vector<double> slotsAvailable_; std::vector<double> slotsAvailable_;
public: public:
/** /**
* Constructor * Constructor
* We need to know the number of students in advance for ordering keys. * We need to know the number of students in advance for ordering keys.
@ -79,26 +75,16 @@ namespace gtsam {
facultyName_.push_back(facultyName); facultyName_.push_back(facultyName);
} }
size_t nrFaculty() const { size_t nrFaculty() const { return facultyName_.size(); }
return facultyName_.size();
}
/** boolean std::string of nrTimeSlots * nrFaculty */ /** boolean std::string of nrTimeSlots * nrFaculty */
void setAvailability(const std::string& available) { void setAvailability(const std::string& available) { available_ = available; }
available_ = available;
}
void addSlot(const std::string& slotName) { void addSlot(const std::string& slotName) { slotName_.push_back(slotName); }
slotName_.push_back(slotName);
}
size_t nrTimeSlots() const { size_t nrTimeSlots() const { return slotName_.size(); }
return slotName_.size();
}
const std::string& slotName(size_t s) const { const std::string& slotName(size_t s) const { return slotName_[s]; }
return slotName_[s];
}
/** slots available, boolean */ /** slots available, boolean */
void setSlotsAvailable(const std::vector<double>& slotsAvailable) { void setSlotsAvailable(const std::vector<double>& slotsAvailable) {
@ -107,7 +93,8 @@ namespace gtsam {
void addArea(const std::string& facultyName, const std::string& areaName) { void addArea(const std::string& facultyName, const std::string& areaName) {
areaName_.push_back(areaName); areaName_.push_back(areaName);
std::vector<double>& table = facultyInArea_[areaName]; // will create if needed std::vector<double>& table =
facultyInArea_[areaName]; // will create if needed
if (table.empty()) table.resize(nrFaculty(), 0); if (table.empty()) table.resize(nrFaculty(), 0);
table[facultyIndex_[facultyName]] = 1; table[facultyIndex_[facultyName]] = 1;
} }
@ -119,7 +106,8 @@ namespace gtsam {
Scheduler(size_t maxNrStudents, const std::string& filename); Scheduler(size_t maxNrStudents, const std::string& filename);
/** get key for student and area, 0 is time slot itself */ /** get key for student and area, 0 is time slot itself */
const DiscreteKey& key(size_t s, boost::optional<size_t> area = boost::none) const; const DiscreteKey& key(size_t s,
boost::optional<size_t> area = boost::none) const;
/** addStudent has to be called after adding slots and faculty */ /** addStudent has to be called after adding slots and faculty */
void addStudent(const std::string& studentName, const std::string& area1, void addStudent(const std::string& studentName, const std::string& area1,
@ -127,16 +115,15 @@ namespace gtsam {
const std::string& advisor); const std::string& advisor);
/// current number of students /// current number of students
size_t nrStudents() const { size_t nrStudents() const { return students_.size(); }
return students_.size();
}
const std::string& studentName(size_t i) const; const std::string& studentName(size_t i) const;
const DiscreteKey& studentKey(size_t i) const; const DiscreteKey& studentKey(size_t i) const;
const std::string& studentArea(size_t i, size_t area) const; const std::string& studentArea(size_t i, size_t area) const;
/** Add student-specific constraints to the graph */ /** Add student-specific constraints to the graph */
void addStudentSpecificConstraints(size_t i, boost::optional<size_t> slot = boost::none); void addStudentSpecificConstraints(
size_t i, boost::optional<size_t> slot = boost::none);
/** Main routine that builds factor graph */ /** Main routine that builds factor graph */
void buildGraph(size_t mutexBound = 7); void buildGraph(size_t mutexBound = 7);
@ -168,8 +155,6 @@ namespace gtsam {
/** find the corresponding most desirable committee assignment */ /** find the corresponding most desirable committee assignment */
sharedValues bestAssignment(sharedValues bestSchedule) const; sharedValues bestAssignment(sharedValues bestSchedule) const;
}; // Scheduler }; // Scheduler
} // gtsam
} // namespace gtsam

View File

@ -5,75 +5,74 @@
* @author Frank Dellaert * @author Frank Dellaert
*/ */
#include <gtsam_unstable/discrete/SingleValue.h>
#include <gtsam_unstable/discrete/Domain.h>
#include <gtsam/discrete/DecisionTreeFactor.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam/discrete/DecisionTreeFactor.h>
#include <gtsam_unstable/discrete/Domain.h>
#include <gtsam_unstable/discrete/SingleValue.h>
#include <boost/make_shared.hpp> #include <boost/make_shared.hpp>
namespace gtsam { namespace gtsam {
using namespace std; using namespace std;
/* ************************************************************************* */ /* ************************************************************************* */
void SingleValue::print(const string& s, void SingleValue::print(const string& s, const KeyFormatter& formatter) const {
const KeyFormatter& formatter) const { cout << s << "SingleValue on "
cout << s << "SingleValue on " << "j=" << formatter(keys_[0]) << "j=" << formatter(keys_[0]) << " with value " << value_ << endl;
<< " with value " << value_ << endl; }
}
/* ************************************************************************* */ /* ************************************************************************* */
double SingleValue::operator()(const Values& values) const { double SingleValue::operator()(const Values& values) const {
return (double) (values.at(keys_[0]) == value_); return (double)(values.at(keys_[0]) == value_);
} }
/* ************************************************************************* */ /* ************************************************************************* */
DecisionTreeFactor SingleValue::toDecisionTreeFactor() const { DecisionTreeFactor SingleValue::toDecisionTreeFactor() const {
DiscreteKeys keys; DiscreteKeys keys;
keys += DiscreteKey(keys_[0],cardinality_); keys += DiscreteKey(keys_[0], cardinality_);
vector<double> table; vector<double> table;
for (size_t i1 = 0; i1 < cardinality_; i1++) for (size_t i1 = 0; i1 < cardinality_; i1++) table.push_back(i1 == value_);
table.push_back(i1 == value_);
DecisionTreeFactor converted(keys, table); DecisionTreeFactor converted(keys, table);
return converted; return converted;
} }
/* ************************************************************************* */ /* ************************************************************************* */
DecisionTreeFactor SingleValue::operator*(const DecisionTreeFactor& f) const { DecisionTreeFactor SingleValue::operator*(const DecisionTreeFactor& f) const {
// TODO: can we do this more efficiently? // TODO: can we do this more efficiently?
return toDecisionTreeFactor() * f; return toDecisionTreeFactor() * f;
} }
/* ************************************************************************* */ /* ************************************************************************* */
bool SingleValue::ensureArcConsistency(size_t j, bool SingleValue::ensureArcConsistency(size_t j,
vector<Domain>& domains) const { vector<Domain>& domains) const {
if (j != keys_[0]) throw invalid_argument( if (j != keys_[0])
"SingleValue check on wrong domain"); throw invalid_argument("SingleValue check on wrong domain");
Domain& D = domains[j]; Domain& D = domains[j];
if (D.isSingleton()) { if (D.isSingleton()) {
if (D.firstValue() != value_) throw runtime_error("Unsatisfiable"); if (D.firstValue() != value_) throw runtime_error("Unsatisfiable");
return false; return false;
} }
D = Domain(discreteKey(),value_); D = Domain(discreteKey(), value_);
return true; return true;
} }
/* ************************************************************************* */ /* ************************************************************************* */
Constraint::shared_ptr SingleValue::partiallyApply(const Values& values) const { Constraint::shared_ptr SingleValue::partiallyApply(const Values& values) const {
Values::const_iterator it = values.find(keys_[0]); Values::const_iterator it = values.find(keys_[0]);
if (it != values.end() && it->second != value_) throw runtime_error( if (it != values.end() && it->second != value_)
"SingleValue::partiallyApply: unsatisfiable"); throw runtime_error("SingleValue::partiallyApply: unsatisfiable");
return boost::make_shared < SingleValue > (keys_[0], cardinality_, value_); return boost::make_shared<SingleValue>(keys_[0], cardinality_, value_);
} }
/* ************************************************************************* */ /* ************************************************************************* */
Constraint::shared_ptr SingleValue::partiallyApply( Constraint::shared_ptr SingleValue::partiallyApply(
const vector<Domain>& domains) const { const vector<Domain>& domains) const {
const Domain& Dk = domains[keys_[0]]; const Domain& Dk = domains[keys_[0]];
if (Dk.isSingleton() && !Dk.contains(value_)) throw runtime_error( if (Dk.isSingleton() && !Dk.contains(value_))
"SingleValue::partiallyApply: unsatisfiable"); throw runtime_error("SingleValue::partiallyApply: unsatisfiable");
return boost::make_shared < SingleValue > (discreteKey(), value_); return boost::make_shared<SingleValue>(discreteKey(), value_);
} }
/* ************************************************************************* */ /* ************************************************************************* */
} // namespace gtsam } // namespace gtsam

View File

@ -7,16 +7,15 @@
#pragma once #pragma once
#include <gtsam_unstable/discrete/Constraint.h>
#include <gtsam/discrete/DiscreteKey.h> #include <gtsam/discrete/DiscreteKey.h>
#include <gtsam_unstable/discrete/Constraint.h>
namespace gtsam { namespace gtsam {
/** /**
* SingleValue constraint * SingleValue constraint
*/ */
class GTSAM_UNSTABLE_EXPORT SingleValue: public Constraint { class GTSAM_UNSTABLE_EXPORT SingleValue : public Constraint {
/// Number of values /// Number of values
size_t cardinality_; size_t cardinality_;
@ -24,34 +23,31 @@ namespace gtsam {
size_t value_; size_t value_;
DiscreteKey discreteKey() const { DiscreteKey discreteKey() const {
return DiscreteKey(keys_[0],cardinality_); return DiscreteKey(keys_[0], cardinality_);
} }
public: public:
typedef boost::shared_ptr<SingleValue> shared_ptr; typedef boost::shared_ptr<SingleValue> shared_ptr;
/// Constructor /// Constructor
SingleValue(Key key, size_t n, size_t value) : SingleValue(Key key, size_t n, size_t value)
Constraint(key), cardinality_(n), value_(value) { : Constraint(key), cardinality_(n), value_(value) {}
}
/// Constructor /// Constructor
SingleValue(const DiscreteKey& dkey, size_t value) : SingleValue(const DiscreteKey& dkey, size_t value)
Constraint(dkey.first), cardinality_(dkey.second), value_(value) { : Constraint(dkey.first), cardinality_(dkey.second), value_(value) {}
}
// print // print
void print(const std::string& s = "", void print(const std::string& s = "", const KeyFormatter& formatter =
const KeyFormatter& formatter = DefaultKeyFormatter) const override; DefaultKeyFormatter) const override;
/// equals /// equals
bool equals(const DiscreteFactor& other, double tol) const override { bool equals(const DiscreteFactor& other, double tol) const override {
if(!dynamic_cast<const SingleValue*>(&other)) if (!dynamic_cast<const SingleValue*>(&other))
return false; return false;
else { else {
const SingleValue& f(static_cast<const SingleValue&>(other)); const SingleValue& f(static_cast<const SingleValue&>(other));
return (cardinality_==f.cardinality_) && (value_==f.value_); return (cardinality_ == f.cardinality_) && (value_ == f.value_);
} }
} }
@ -69,7 +65,8 @@ namespace gtsam {
* @param j domain to be checked * @param j domain to be checked
* @param domains all other domains * @param domains all other domains
*/ */
bool ensureArcConsistency(size_t j, std::vector<Domain>& domains) const override; bool ensureArcConsistency(size_t j,
std::vector<Domain>& domains) const override;
/// Partially apply known values /// Partially apply known values
Constraint::shared_ptr partiallyApply(const Values& values) const override; Constraint::shared_ptr partiallyApply(const Values& values) const override;
@ -77,6 +74,6 @@ namespace gtsam {
/// Partially apply known values, domain version /// Partially apply known values, domain version
Constraint::shared_ptr partiallyApply( Constraint::shared_ptr partiallyApply(
const std::vector<Domain>& domains) const override; const std::vector<Domain>& domains) const override;
}; };
} // namespace gtsam } // namespace gtsam

View File

@ -7,49 +7,50 @@
#include <gtsam_unstable/discrete/CSP.h> #include <gtsam_unstable/discrete/CSP.h>
#include <gtsam_unstable/discrete/Domain.h> #include <gtsam_unstable/discrete/Domain.h>
#include <boost/assign/std/map.hpp> #include <boost/assign/std/map.hpp>
using boost::assign::insert; using boost::assign::insert;
#include <CppUnitLite/TestHarness.h> #include <CppUnitLite/TestHarness.h>
#include <iostream>
#include <fstream> #include <fstream>
#include <iostream>
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
/* ************************************************************************* */ /* ************************************************************************* */
TEST_UNSAFE( BinaryAllDif, allInOne) TEST_UNSAFE(BinaryAllDif, allInOne) {
{
// Create keys and ordering // Create keys and ordering
size_t nrColors = 2; size_t nrColors = 2;
// DiscreteKey ID("Idaho", nrColors), UT("Utah", nrColors), AZ("Arizona", nrColors); // DiscreteKey ID("Idaho", nrColors), UT("Utah", nrColors), AZ("Arizona",
// nrColors);
DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors); DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors);
// Check construction and conversion // Check construction and conversion
BinaryAllDiff c1(ID, UT); BinaryAllDiff c1(ID, UT);
DecisionTreeFactor f1(ID & UT, "0 1 1 0"); DecisionTreeFactor f1(ID & UT, "0 1 1 0");
EXPECT(assert_equal(f1,c1.toDecisionTreeFactor())); EXPECT(assert_equal(f1, c1.toDecisionTreeFactor()));
// Check construction and conversion // Check construction and conversion
BinaryAllDiff c2(UT, AZ); BinaryAllDiff c2(UT, AZ);
DecisionTreeFactor f2(UT & AZ, "0 1 1 0"); DecisionTreeFactor f2(UT & AZ, "0 1 1 0");
EXPECT(assert_equal(f2,c2.toDecisionTreeFactor())); EXPECT(assert_equal(f2, c2.toDecisionTreeFactor()));
DecisionTreeFactor f3 = f1*f2; DecisionTreeFactor f3 = f1 * f2;
EXPECT(assert_equal(f3,c1*f2)); EXPECT(assert_equal(f3, c1 * f2));
EXPECT(assert_equal(f3,c2*f1)); EXPECT(assert_equal(f3, c2 * f1));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST_UNSAFE( CSP, allInOne) TEST_UNSAFE(CSP, allInOne) {
{
// Create keys and ordering // Create keys and ordering
size_t nrColors = 2; size_t nrColors = 2;
DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors); DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors);
// Create the CSP // Create the CSP
CSP csp; CSP csp;
csp.addAllDiff(ID,UT); csp.addAllDiff(ID, UT);
csp.addAllDiff(UT,AZ); csp.addAllDiff(UT, AZ);
// Check an invalid combination, with ID==UT==AZ all same color // Check an invalid combination, with ID==UT==AZ all same color
DiscreteFactor::Values invalid; DiscreteFactor::Values invalid;
@ -69,67 +70,67 @@ TEST_UNSAFE( CSP, allInOne)
DecisionTreeFactor product = csp.product(); DecisionTreeFactor product = csp.product();
// product.dot("product"); // product.dot("product");
DecisionTreeFactor expectedProduct(ID & AZ & UT, "0 1 0 0 0 0 1 0"); DecisionTreeFactor expectedProduct(ID & AZ & UT, "0 1 0 0 0 0 1 0");
EXPECT(assert_equal(expectedProduct,product)); EXPECT(assert_equal(expectedProduct, product));
// Solve // Solve
CSP::sharedValues mpe = csp.optimalAssignment(); CSP::sharedValues mpe = csp.optimalAssignment();
CSP::Values expected; CSP::Values expected;
insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 1); insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 1);
EXPECT(assert_equal(expected,*mpe)); EXPECT(assert_equal(expected, *mpe));
EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9); EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9);
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST_UNSAFE( CSP, WesternUS) TEST_UNSAFE(CSP, WesternUS) {
{
// Create keys // Create keys
size_t nrColors = 4; size_t nrColors = 4;
DiscreteKey DiscreteKey
// Create ordering according to example in ND-CSP.lyx // Create ordering according to example in ND-CSP.lyx
WA(0, nrColors), OR(3, nrColors), CA(1, nrColors),NV(2, nrColors), WA(0, nrColors),
ID(8, nrColors), UT(9, nrColors), AZ(10, nrColors), OR(3, nrColors), CA(1, nrColors), NV(2, nrColors), ID(8, nrColors),
MT(4, nrColors), WY(5, nrColors), CO(7, nrColors), NM(6, nrColors); UT(9, nrColors), AZ(10, nrColors), MT(4, nrColors), WY(5, nrColors),
CO(7, nrColors), NM(6, nrColors);
// Create the CSP // Create the CSP
CSP csp; CSP csp;
csp.addAllDiff(WA,ID); csp.addAllDiff(WA, ID);
csp.addAllDiff(WA,OR); csp.addAllDiff(WA, OR);
csp.addAllDiff(OR,ID); csp.addAllDiff(OR, ID);
csp.addAllDiff(OR,CA); csp.addAllDiff(OR, CA);
csp.addAllDiff(OR,NV); csp.addAllDiff(OR, NV);
csp.addAllDiff(CA,NV); csp.addAllDiff(CA, NV);
csp.addAllDiff(CA,AZ); csp.addAllDiff(CA, AZ);
csp.addAllDiff(ID,MT); csp.addAllDiff(ID, MT);
csp.addAllDiff(ID,WY); csp.addAllDiff(ID, WY);
csp.addAllDiff(ID,UT); csp.addAllDiff(ID, UT);
csp.addAllDiff(ID,NV); csp.addAllDiff(ID, NV);
csp.addAllDiff(NV,UT); csp.addAllDiff(NV, UT);
csp.addAllDiff(NV,AZ); csp.addAllDiff(NV, AZ);
csp.addAllDiff(UT,WY); csp.addAllDiff(UT, WY);
csp.addAllDiff(UT,CO); csp.addAllDiff(UT, CO);
csp.addAllDiff(UT,NM); csp.addAllDiff(UT, NM);
csp.addAllDiff(UT,AZ); csp.addAllDiff(UT, AZ);
csp.addAllDiff(AZ,CO); csp.addAllDiff(AZ, CO);
csp.addAllDiff(AZ,NM); csp.addAllDiff(AZ, NM);
csp.addAllDiff(MT,WY); csp.addAllDiff(MT, WY);
csp.addAllDiff(WY,CO); csp.addAllDiff(WY, CO);
csp.addAllDiff(CO,NM); csp.addAllDiff(CO, NM);
// Solve // Solve
Ordering ordering; Ordering ordering;
ordering += Key(0),Key(1),Key(2),Key(3),Key(4),Key(5),Key(6),Key(7),Key(8),Key(9),Key(10); ordering += Key(0), Key(1), Key(2), Key(3), Key(4), Key(5), Key(6), Key(7),
Key(8), Key(9), Key(10);
CSP::sharedValues mpe = csp.optimalAssignment(ordering); CSP::sharedValues mpe = csp.optimalAssignment(ordering);
// GTSAM_PRINT(*mpe); // GTSAM_PRINT(*mpe);
CSP::Values expected; CSP::Values expected;
insert(expected) insert(expected)(WA.first, 1)(CA.first, 1)(NV.first, 3)(OR.first, 0)(
(WA.first,1)(CA.first,1)(NV.first,3)(OR.first,0) MT.first, 1)(WY.first, 0)(NM.first, 3)(CO.first, 2)(ID.first, 2)(
(MT.first,1)(WY.first,0)(NM.first,3)(CO.first,2) UT.first, 1)(AZ.first, 0);
(ID.first,2)(UT.first,1)(AZ.first,0);
// TODO: Fix me! mpe result seems to be right. (See the printing) // TODO: Fix me! mpe result seems to be right. (See the printing)
// It has the same prob as the expected solution. // It has the same prob as the expected solution.
// Is mpe another solution, or the expected solution is unique??? // Is mpe another solution, or the expected solution is unique???
EXPECT(assert_equal(expected,*mpe)); EXPECT(assert_equal(expected, *mpe));
EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9); EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9);
// Write out the dual graph for hmetis // Write out the dual graph for hmetis
@ -142,8 +143,7 @@ TEST_UNSAFE( CSP, WesternUS)
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST_UNSAFE( CSP, AllDiff) TEST_UNSAFE(CSP, AllDiff) {
{
// Create keys and ordering // Create keys and ordering
size_t nrColors = 3; size_t nrColors = 3;
DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors); DiscreteKey ID(0, nrColors), UT(2, nrColors), AZ(1, nrColors);
@ -151,24 +151,25 @@ TEST_UNSAFE( CSP, AllDiff)
// Create the CSP // Create the CSP
CSP csp; CSP csp;
vector<DiscreteKey> dkeys; vector<DiscreteKey> dkeys;
dkeys += ID,UT,AZ; dkeys += ID, UT, AZ;
csp.addAllDiff(dkeys); csp.addAllDiff(dkeys);
csp.addSingleValue(AZ,2); csp.addSingleValue(AZ, 2);
// GTSAM_PRINT(csp); // GTSAM_PRINT(csp);
// Check construction and conversion // Check construction and conversion
SingleValue s(AZ,2); SingleValue s(AZ, 2);
DecisionTreeFactor f1(AZ,"0 0 1"); DecisionTreeFactor f1(AZ, "0 0 1");
EXPECT(assert_equal(f1,s.toDecisionTreeFactor())); EXPECT(assert_equal(f1, s.toDecisionTreeFactor()));
// Check construction and conversion // Check construction and conversion
AllDiff alldiff(dkeys); AllDiff alldiff(dkeys);
DecisionTreeFactor actual = alldiff.toDecisionTreeFactor(); DecisionTreeFactor actual = alldiff.toDecisionTreeFactor();
// GTSAM_PRINT(actual); // GTSAM_PRINT(actual);
// actual.dot("actual"); // actual.dot("actual");
DecisionTreeFactor f2(ID & AZ & UT, DecisionTreeFactor f2(
ID & AZ & UT,
"0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0"); "0 0 0 0 0 1 0 1 0 0 0 1 0 0 0 1 0 0 0 1 0 1 0 0 0 0 0");
EXPECT(assert_equal(f2,actual)); EXPECT(assert_equal(f2, actual));
// Check an invalid combination, with ID==UT==AZ all same color // Check an invalid combination, with ID==UT==AZ all same color
DiscreteFactor::Values invalid; DiscreteFactor::Values invalid;
@ -188,36 +189,36 @@ TEST_UNSAFE( CSP, AllDiff)
CSP::sharedValues mpe = csp.optimalAssignment(); CSP::sharedValues mpe = csp.optimalAssignment();
CSP::Values expected; CSP::Values expected;
insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 2); insert(expected)(ID.first, 1)(UT.first, 0)(AZ.first, 2);
EXPECT(assert_equal(expected,*mpe)); EXPECT(assert_equal(expected, *mpe));
EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9); EXPECT_DOUBLES_EQUAL(1, csp(*mpe), 1e-9);
// Arc-consistency // Arc-consistency
vector<Domain> domains; vector<Domain> domains;
domains += Domain(ID), Domain(AZ), Domain(UT); domains += Domain(ID), Domain(AZ), Domain(UT);
SingleValue singleValue(AZ,2); SingleValue singleValue(AZ, 2);
EXPECT(singleValue.ensureArcConsistency(1,domains)); EXPECT(singleValue.ensureArcConsistency(1, domains));
EXPECT(alldiff.ensureArcConsistency(0,domains)); EXPECT(alldiff.ensureArcConsistency(0, domains));
EXPECT(!alldiff.ensureArcConsistency(1,domains)); EXPECT(!alldiff.ensureArcConsistency(1, domains));
EXPECT(alldiff.ensureArcConsistency(2,domains)); EXPECT(alldiff.ensureArcConsistency(2, domains));
LONGS_EQUAL(2,domains[0].nrValues()); LONGS_EQUAL(2, domains[0].nrValues());
LONGS_EQUAL(1,domains[1].nrValues()); LONGS_EQUAL(1, domains[1].nrValues());
LONGS_EQUAL(2,domains[2].nrValues()); LONGS_EQUAL(2, domains[2].nrValues());
// Parial application, version 1 // Parial application, version 1
DiscreteFactor::Values known; DiscreteFactor::Values known;
known[AZ.first] = 2; known[AZ.first] = 2;
DiscreteFactor::shared_ptr reduced1 = alldiff.partiallyApply(known); DiscreteFactor::shared_ptr reduced1 = alldiff.partiallyApply(known);
DecisionTreeFactor f3(ID & UT, "0 1 1 1 0 1 1 1 0"); DecisionTreeFactor f3(ID & UT, "0 1 1 1 0 1 1 1 0");
EXPECT(assert_equal(f3,reduced1->toDecisionTreeFactor())); EXPECT(assert_equal(f3, reduced1->toDecisionTreeFactor()));
DiscreteFactor::shared_ptr reduced2 = singleValue.partiallyApply(known); DiscreteFactor::shared_ptr reduced2 = singleValue.partiallyApply(known);
DecisionTreeFactor f4(AZ, "0 0 1"); DecisionTreeFactor f4(AZ, "0 0 1");
EXPECT(assert_equal(f4,reduced2->toDecisionTreeFactor())); EXPECT(assert_equal(f4, reduced2->toDecisionTreeFactor()));
// Parial application, version 2 // Parial application, version 2
DiscreteFactor::shared_ptr reduced3 = alldiff.partiallyApply(domains); DiscreteFactor::shared_ptr reduced3 = alldiff.partiallyApply(domains);
EXPECT(assert_equal(f3,reduced3->toDecisionTreeFactor())); EXPECT(assert_equal(f3, reduced3->toDecisionTreeFactor()));
DiscreteFactor::shared_ptr reduced4 = singleValue.partiallyApply(domains); DiscreteFactor::shared_ptr reduced4 = singleValue.partiallyApply(domains);
EXPECT(assert_equal(f4,reduced4->toDecisionTreeFactor())); EXPECT(assert_equal(f4, reduced4->toDecisionTreeFactor()));
// full arc-consistency test // full arc-consistency test
csp.runArcConsistency(nrColors); csp.runArcConsistency(nrColors);
@ -229,4 +230,3 @@ int main() {
return TestRegistry::runAllTests(tr); return TestRegistry::runAllTests(tr);
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -5,14 +5,15 @@
* @date Oct 11, 2013 * @date Oct 11, 2013
*/ */
#include <gtsam/inference/VariableIndex.h> #include <CppUnitLite/TestHarness.h>
#include <gtsam/discrete/DecisionTreeFactor.h> #include <gtsam/discrete/DecisionTreeFactor.h>
#include <gtsam/discrete/DiscreteFactorGraph.h> #include <gtsam/discrete/DiscreteFactorGraph.h>
#include <CppUnitLite/TestHarness.h> #include <gtsam/inference/VariableIndex.h>
#include <boost/range/adaptor/map.hpp>
#include <boost/assign/list_of.hpp> #include <boost/assign/list_of.hpp>
#include <iostream> #include <boost/range/adaptor/map.hpp>
#include <fstream> #include <fstream>
#include <iostream>
using namespace std; using namespace std;
using namespace boost; using namespace boost;
@ -23,11 +24,12 @@ using namespace gtsam;
* Loopy belief solver for graphs with only binary and unary factors * Loopy belief solver for graphs with only binary and unary factors
*/ */
class LoopyBelief { class LoopyBelief {
/** Star graph struct for each node, containing /** Star graph struct for each node, containing
* - the star graph itself * - the star graph itself
* - the product of original unary factors so we don't have to recompute it later, and * - the product of original unary factors so we don't have to recompute it
* - the factor indices of the corrected belief factors of the neighboring nodes * later, and
* - the factor indices of the corrected belief factors of the neighboring
* nodes
*/ */
typedef std::map<Key, size_t> CorrectedBeliefIndices; typedef std::map<Key, size_t> CorrectedBeliefIndices;
struct StarGraph { struct StarGraph {
@ -37,40 +39,40 @@ class LoopyBelief {
VariableIndex varIndex_; VariableIndex varIndex_;
StarGraph(const DiscreteFactorGraph::shared_ptr& _star, StarGraph(const DiscreteFactorGraph::shared_ptr& _star,
const CorrectedBeliefIndices& _beliefIndices, const CorrectedBeliefIndices& _beliefIndices,
const DecisionTreeFactor::shared_ptr& _unary) : const DecisionTreeFactor::shared_ptr& _unary)
star(_star), correctedBeliefIndices(_beliefIndices), unary(_unary), varIndex_( : star(_star),
*_star) { correctedBeliefIndices(_beliefIndices),
} unary(_unary),
varIndex_(*_star) {}
void print(const std::string& s = "") const { void print(const std::string& s = "") const {
cout << s << ":" << endl; cout << s << ":" << endl;
star->print("Star graph: "); star->print("Star graph: ");
for(Key key: correctedBeliefIndices | boost::adaptors::map_keys) { for (Key key : correctedBeliefIndices | boost::adaptors::map_keys) {
cout << "Belief factor index for " << key << ": " cout << "Belief factor index for " << key << ": "
<< correctedBeliefIndices.at(key) << endl; << correctedBeliefIndices.at(key) << endl;
} }
if (unary) if (unary) unary->print("Unary: ");
unary->print("Unary: ");
} }
}; };
typedef std::map<Key, StarGraph> StarGraphs; typedef std::map<Key, StarGraph> StarGraphs;
StarGraphs starGraphs_; ///< star graph at each variable StarGraphs starGraphs_; ///< star graph at each variable
public: public:
/** Constructor /** Constructor
* Need all discrete keys to access node's cardinality for creating belief factors * Need all discrete keys to access node's cardinality for creating belief
* factors
* TODO: so troublesome!! * TODO: so troublesome!!
*/ */
LoopyBelief(const DiscreteFactorGraph& graph, LoopyBelief(const DiscreteFactorGraph& graph,
const std::map<Key, DiscreteKey>& allDiscreteKeys) : const std::map<Key, DiscreteKey>& allDiscreteKeys)
starGraphs_(buildStarGraphs(graph, allDiscreteKeys)) { : starGraphs_(buildStarGraphs(graph, allDiscreteKeys)) {}
}
/// print /// print
void print(const std::string& s = "") const { void print(const std::string& s = "") const {
cout << s << ":" << endl; cout << s << ":" << endl;
for(Key key: starGraphs_ | boost::adaptors::map_keys) { for (Key key : starGraphs_ | boost::adaptors::map_keys) {
starGraphs_.at(key).print((boost::format("Node %d:") % key).str()); starGraphs_.at(key).print((boost::format("Node %d:") % key).str());
} }
} }
@ -79,12 +81,13 @@ public:
DiscreteFactorGraph::shared_ptr iterate( DiscreteFactorGraph::shared_ptr iterate(
const std::map<Key, DiscreteKey>& allDiscreteKeys) { const std::map<Key, DiscreteKey>& allDiscreteKeys) {
static const bool debug = false; static const bool debug = false;
static DiscreteConditional::shared_ptr dummyCond; // unused by-product of elimination static DiscreteConditional::shared_ptr
dummyCond; // unused by-product of elimination
DiscreteFactorGraph::shared_ptr beliefs(new DiscreteFactorGraph()); DiscreteFactorGraph::shared_ptr beliefs(new DiscreteFactorGraph());
std::map<Key, std::map<Key, DiscreteFactor::shared_ptr> > allMessages; std::map<Key, std::map<Key, DiscreteFactor::shared_ptr> > allMessages;
// Eliminate each star graph // Eliminate each star graph
for(Key key: starGraphs_ | boost::adaptors::map_keys) { for (Key key : starGraphs_ | boost::adaptors::map_keys) {
// cout << "***** Node " << key << "*****" << endl; // cout << "***** Node " << key << "*****" << endl;
// initialize belief to the unary factor from the original graph // initialize belief to the unary factor from the original graph
DecisionTreeFactor::shared_ptr beliefAtKey; DecisionTreeFactor::shared_ptr beliefAtKey;
@ -92,15 +95,16 @@ public:
std::map<Key, DiscreteFactor::shared_ptr> messages; std::map<Key, DiscreteFactor::shared_ptr> messages;
// eliminate each neighbor in this star graph one by one // eliminate each neighbor in this star graph one by one
for(Key neighbor: starGraphs_.at(key).correctedBeliefIndices | boost::adaptors::map_keys) { for (Key neighbor : starGraphs_.at(key).correctedBeliefIndices |
boost::adaptors::map_keys) {
DiscreteFactorGraph subGraph; DiscreteFactorGraph subGraph;
for(size_t factor: starGraphs_.at(key).varIndex_[neighbor]) { for (size_t factor : starGraphs_.at(key).varIndex_[neighbor]) {
subGraph.push_back(starGraphs_.at(key).star->at(factor)); subGraph.push_back(starGraphs_.at(key).star->at(factor));
} }
if (debug) subGraph.print("------- Subgraph:"); if (debug) subGraph.print("------- Subgraph:");
DiscreteFactor::shared_ptr message; DiscreteFactor::shared_ptr message;
boost::tie(dummyCond, message) = EliminateDiscrete(subGraph, boost::tie(dummyCond, message) =
Ordering(list_of(neighbor))); EliminateDiscrete(subGraph, Ordering(list_of(neighbor)));
// store the new factor into messages // store the new factor into messages
messages.insert(make_pair(neighbor, message)); messages.insert(make_pair(neighbor, message));
if (debug) message->print("------- Message: "); if (debug) message->print("------- Message: ");
@ -108,14 +112,12 @@ public:
// Belief is the product of all messages and the unary factor // Belief is the product of all messages and the unary factor
// Incorporate new the factor to belief // Incorporate new the factor to belief
if (!beliefAtKey) if (!beliefAtKey)
beliefAtKey = boost::dynamic_pointer_cast<DecisionTreeFactor>(
message);
else
beliefAtKey = beliefAtKey =
boost::make_shared<DecisionTreeFactor>( boost::dynamic_pointer_cast<DecisionTreeFactor>(message);
(*beliefAtKey) else
* (*boost::dynamic_pointer_cast<DecisionTreeFactor>( beliefAtKey = boost::make_shared<DecisionTreeFactor>(
message))); (*beliefAtKey) *
(*boost::dynamic_pointer_cast<DecisionTreeFactor>(message)));
} }
if (starGraphs_.at(key).unary) if (starGraphs_.at(key).unary)
beliefAtKey = boost::make_shared<DecisionTreeFactor>( beliefAtKey = boost::make_shared<DecisionTreeFactor>(
@ -133,7 +135,8 @@ public:
sumFactorTable = (boost::format("%s %f") % sumFactorTable % sum).str(); sumFactorTable = (boost::format("%s %f") % sumFactorTable % sum).str();
DecisionTreeFactor sumFactor(allDiscreteKeys.at(key), sumFactorTable); DecisionTreeFactor sumFactor(allDiscreteKeys.at(key), sumFactorTable);
if (debug) sumFactor.print("denomFactor: "); if (debug) sumFactor.print("denomFactor: ");
beliefAtKey = boost::make_shared<DecisionTreeFactor>((*beliefAtKey) / sumFactor); beliefAtKey =
boost::make_shared<DecisionTreeFactor>((*beliefAtKey) / sumFactor);
if (debug) beliefAtKey->print("New belief at key normalized: "); if (debug) beliefAtKey->print("New belief at key normalized: ");
beliefs->push_back(beliefAtKey); beliefs->push_back(beliefAtKey);
allMessages[key] = messages; allMessages[key] = messages;
@ -141,17 +144,20 @@ public:
// Update corrected beliefs // Update corrected beliefs
VariableIndex beliefFactors(*beliefs); VariableIndex beliefFactors(*beliefs);
for(Key key: starGraphs_ | boost::adaptors::map_keys) { for (Key key : starGraphs_ | boost::adaptors::map_keys) {
std::map<Key, DiscreteFactor::shared_ptr> messages = allMessages[key]; std::map<Key, DiscreteFactor::shared_ptr> messages = allMessages[key];
for(Key neighbor: starGraphs_.at(key).correctedBeliefIndices | boost::adaptors::map_keys) { for (Key neighbor : starGraphs_.at(key).correctedBeliefIndices |
DecisionTreeFactor correctedBelief = (*boost::dynamic_pointer_cast< boost::adaptors::map_keys) {
DecisionTreeFactor>(beliefs->at(beliefFactors[key].front()))) DecisionTreeFactor correctedBelief =
/ (*boost::dynamic_pointer_cast<DecisionTreeFactor>( (*boost::dynamic_pointer_cast<DecisionTreeFactor>(
beliefs->at(beliefFactors[key].front()))) /
(*boost::dynamic_pointer_cast<DecisionTreeFactor>(
messages.at(neighbor))); messages.at(neighbor)));
if (debug) correctedBelief.print("correctedBelief: "); if (debug) correctedBelief.print("correctedBelief: ");
size_t beliefIndex = starGraphs_.at(neighbor).correctedBeliefIndices.at( size_t beliefIndex =
key); starGraphs_.at(neighbor).correctedBeliefIndices.at(key);
starGraphs_.at(neighbor).star->replace(beliefIndex, starGraphs_.at(neighbor).star->replace(
beliefIndex,
boost::make_shared<DecisionTreeFactor>(correctedBelief)); boost::make_shared<DecisionTreeFactor>(correctedBelief));
} }
} }
@ -161,21 +167,22 @@ public:
return beliefs; return beliefs;
} }
private: private:
/** /**
* Build star graphs for each node. * Build star graphs for each node.
*/ */
StarGraphs buildStarGraphs(const DiscreteFactorGraph& graph, StarGraphs buildStarGraphs(
const DiscreteFactorGraph& graph,
const std::map<Key, DiscreteKey>& allDiscreteKeys) const { const std::map<Key, DiscreteKey>& allDiscreteKeys) const {
StarGraphs starGraphs; StarGraphs starGraphs;
VariableIndex varIndex(graph); ///< access to all factors of each node VariableIndex varIndex(graph); ///< access to all factors of each node
for(Key key: varIndex | boost::adaptors::map_keys) { for (Key key : varIndex | boost::adaptors::map_keys) {
// initialize to multiply with other unary factors later // initialize to multiply with other unary factors later
DecisionTreeFactor::shared_ptr prodOfUnaries; DecisionTreeFactor::shared_ptr prodOfUnaries;
// collect all factors involving this key in the original graph // collect all factors involving this key in the original graph
DiscreteFactorGraph::shared_ptr star(new DiscreteFactorGraph()); DiscreteFactorGraph::shared_ptr star(new DiscreteFactorGraph());
for(size_t factorIndex: varIndex[key]) { for (size_t factorIndex : varIndex[key]) {
star->push_back(graph.at(factorIndex)); star->push_back(graph.at(factorIndex));
// accumulate unary factors // accumulate unary factors
@ -185,8 +192,8 @@ private:
graph.at(factorIndex)); graph.at(factorIndex));
else else
prodOfUnaries = boost::make_shared<DecisionTreeFactor>( prodOfUnaries = boost::make_shared<DecisionTreeFactor>(
*prodOfUnaries *prodOfUnaries *
* (*boost::dynamic_pointer_cast<DecisionTreeFactor>( (*boost::dynamic_pointer_cast<DecisionTreeFactor>(
graph.at(factorIndex)))); graph.at(factorIndex))));
} }
} }
@ -196,7 +203,7 @@ private:
KeySet neighbors = star->keys(); KeySet neighbors = star->keys();
neighbors.erase(key); neighbors.erase(key);
CorrectedBeliefIndices correctedBeliefIndices; CorrectedBeliefIndices correctedBeliefIndices;
for(Key neighbor: neighbors) { for (Key neighbor : neighbors) {
// TODO: default table for keys with more than 2 values? // TODO: default table for keys with more than 2 values?
string initialBelief; string initialBelief;
for (size_t v = 0; v < allDiscreteKeys.at(neighbor).second - 1; ++v) { for (size_t v = 0; v < allDiscreteKeys.at(neighbor).second - 1; ++v) {
@ -207,9 +214,8 @@ private:
DecisionTreeFactor(allDiscreteKeys.at(neighbor), initialBelief)); DecisionTreeFactor(allDiscreteKeys.at(neighbor), initialBelief));
correctedBeliefIndices.insert(make_pair(neighbor, star->size() - 1)); correctedBeliefIndices.insert(make_pair(neighbor, star->size() - 1));
} }
starGraphs.insert( starGraphs.insert(make_pair(
make_pair(key, key, StarGraph(star, correctedBeliefIndices, prodOfUnaries)));
StarGraph(star, correctedBeliefIndices, prodOfUnaries)));
} }
return starGraphs; return starGraphs;
} }
@ -249,7 +255,6 @@ TEST_UNSAFE(LoopyBelief, construction) {
DiscreteFactorGraph::shared_ptr beliefs = solver.iterate(allKeys); DiscreteFactorGraph::shared_ptr beliefs = solver.iterate(allKeys);
beliefs->print(); beliefs->print();
} }
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -5,14 +5,13 @@
*/ */
//#define ENABLE_TIMING //#define ENABLE_TIMING
#include <gtsam_unstable/discrete/Scheduler.h> #include <CppUnitLite/TestHarness.h>
#include <gtsam/base/Testable.h> #include <gtsam/base/Testable.h>
#include <gtsam/base/timing.h> #include <gtsam/base/timing.h>
#include <gtsam_unstable/discrete/Scheduler.h>
#include <CppUnitLite/TestHarness.h>
#include <boost/assign/std/vector.hpp>
#include <boost/assign/std/map.hpp> #include <boost/assign/std/map.hpp>
#include <boost/assign/std/vector.hpp>
#include <boost/optional.hpp> #include <boost/optional.hpp>
using namespace boost::assign; using namespace boost::assign;
@ -22,7 +21,6 @@ using namespace gtsam;
/* ************************************************************************* */ /* ************************************************************************* */
// Create the expected graph of constraints // Create the expected graph of constraints
DiscreteFactorGraph createExpected() { DiscreteFactorGraph createExpected() {
// Start building // Start building
size_t nrFaculty = 4, nrTimeSlots = 3; size_t nrFaculty = 4, nrTimeSlots = 3;
@ -79,8 +77,7 @@ DiscreteFactorGraph createExpected() {
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( schedulingExample, test) TEST(schedulingExample, test) {
{
Scheduler s(2); Scheduler s(2);
// add faculty // add faculty
@ -121,7 +118,7 @@ TEST( schedulingExample, test)
// Do brute force product and output that to file // Do brute force product and output that to file
DecisionTreeFactor product = s.product(); DecisionTreeFactor product = s.product();
//product.dot("scheduling", false); // product.dot("scheduling", false);
// Do exact inference // Do exact inference
gttic(small); gttic(small);
@ -129,25 +126,24 @@ TEST( schedulingExample, test)
gttoc(small); gttoc(small);
// print MPE, commented out as unit tests don't print // print MPE, commented out as unit tests don't print
// s.printAssignment(MPE); // s.printAssignment(MPE);
// Commented out as does not work yet // Commented out as does not work yet
// s.runArcConsistency(8,10,true); // s.runArcConsistency(8,10,true);
// find the assignment of students to slots with most possible committees // find the assignment of students to slots with most possible committees
// Commented out as not implemented yet // Commented out as not implemented yet
// sharedValues bestSchedule = s.bestSchedule(); // sharedValues bestSchedule = s.bestSchedule();
// GTSAM_PRINT(*bestSchedule); // GTSAM_PRINT(*bestSchedule);
// find the corresponding most desirable committee assignment // find the corresponding most desirable committee assignment
// Commented out as not implemented yet // Commented out as not implemented yet
// sharedValues bestAssignment = s.bestAssignment(bestSchedule); // sharedValues bestAssignment = s.bestAssignment(bestSchedule);
// GTSAM_PRINT(*bestAssignment); // GTSAM_PRINT(*bestAssignment);
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST( schedulingExample, smallFromFile) TEST(schedulingExample, smallFromFile) {
{
string path(TOPSRCDIR "/gtsam_unstable/discrete/examples/"); string path(TOPSRCDIR "/gtsam_unstable/discrete/examples/");
Scheduler s(2, path + "small.csv"); Scheduler s(2, path + "small.csv");
@ -179,4 +175,3 @@ int main() {
return TestRegistry::runAllTests(tr); return TestRegistry::runAllTests(tr);
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -5,21 +5,22 @@
* @author Frank Dellaert * @author Frank Dellaert
*/ */
#include <gtsam_unstable/discrete/CSP.h>
#include <CppUnitLite/TestHarness.h> #include <CppUnitLite/TestHarness.h>
#include <gtsam_unstable/discrete/CSP.h>
#include <boost/assign/std/map.hpp> #include <boost/assign/std/map.hpp>
using boost::assign::insert; using boost::assign::insert;
#include <stdarg.h>
#include <iostream> #include <iostream>
#include <sstream> #include <sstream>
#include <stdarg.h>
using namespace std; using namespace std;
using namespace gtsam; using namespace gtsam;
#define PRINT false #define PRINT false
class Sudoku: public CSP { class Sudoku : public CSP {
/// sudoku size /// sudoku size
size_t n_; size_t n_;
@ -27,25 +28,21 @@ class Sudoku: public CSP {
typedef std::pair<size_t, size_t> IJ; typedef std::pair<size_t, size_t> IJ;
std::map<IJ, DiscreteKey> dkeys_; std::map<IJ, DiscreteKey> dkeys_;
public: public:
/// return DiscreteKey for cell(i,j) /// return DiscreteKey for cell(i,j)
const DiscreteKey& dkey(size_t i, size_t j) const { const DiscreteKey& dkey(size_t i, size_t j) const {
return dkeys_.at(IJ(i, j)); return dkeys_.at(IJ(i, j));
} }
/// return Key for cell(i,j) /// return Key for cell(i,j)
Key key(size_t i, size_t j) const { Key key(size_t i, size_t j) const { return dkey(i, j).first; }
return dkey(i, j).first;
}
/// Constructor /// Constructor
Sudoku(size_t n, ...) : Sudoku(size_t n, ...) : n_(n) {
n_(n) {
// Create variables, ordering, and unary constraints // Create variables, ordering, and unary constraints
va_list ap; va_list ap;
va_start(ap, n); va_start(ap, n);
Key k=0; Key k = 0;
for (size_t i = 0; i < n; ++i) { for (size_t i = 0; i < n; ++i) {
for (size_t j = 0; j < n; ++j, ++k) { for (size_t j = 0; j < n; ++j, ++k) {
// create the key // create the key
@ -56,23 +53,21 @@ public:
// cout << value << " "; // cout << value << " ";
if (value != 0) addSingleValue(dkeys_[ij], value - 1); if (value != 0) addSingleValue(dkeys_[ij], value - 1);
} }
//cout << endl; // cout << endl;
} }
va_end(ap); va_end(ap);
// add row constraints // add row constraints
for (size_t i = 0; i < n; i++) { for (size_t i = 0; i < n; i++) {
DiscreteKeys dkeys; DiscreteKeys dkeys;
for (size_t j = 0; j < n; j++) for (size_t j = 0; j < n; j++) dkeys += dkey(i, j);
dkeys += dkey(i, j);
addAllDiff(dkeys); addAllDiff(dkeys);
} }
// add col constraints // add col constraints
for (size_t j = 0; j < n; j++) { for (size_t j = 0; j < n; j++) {
DiscreteKeys dkeys; DiscreteKeys dkeys;
for (size_t i = 0; i < n; i++) for (size_t i = 0; i < n; i++) dkeys += dkey(i, j);
dkeys += dkey(i, j);
addAllDiff(dkeys); addAllDiff(dkeys);
} }
@ -84,8 +79,7 @@ public:
// Box I,J // Box I,J
DiscreteKeys dkeys; DiscreteKeys dkeys;
for (size_t i = i0; i < i0 + N; i++) for (size_t i = i0; i < i0 + N; i++)
for (size_t j = j0; j < j0 + N; j++) for (size_t j = j0; j < j0 + N; j++) dkeys += dkey(i, j);
dkeys += dkey(i, j);
addAllDiff(dkeys); addAllDiff(dkeys);
j0 += N; j0 += N;
} }
@ -109,74 +103,66 @@ public:
DiscreteFactor::sharedValues MPE = optimalAssignment(); DiscreteFactor::sharedValues MPE = optimalAssignment();
printAssignment(MPE); printAssignment(MPE);
} }
}; };
/* ************************************************************************* */ /* ************************************************************************* */
TEST_UNSAFE( Sudoku, small) TEST_UNSAFE(Sudoku, small) {
{ Sudoku csp(4, //
Sudoku csp(4, 1, 0, 0, 4, //
1,0, 0,4, 0, 0, 0, 0, //
0,0, 0,0, 4, 0, 2, 0, //
0, 1, 0, 0);
4,0, 2,0,
0,1, 0,0);
// Do BP // Do BP
csp.runArcConsistency(4,10,PRINT); csp.runArcConsistency(4, 10, PRINT);
// optimize and check // optimize and check
CSP::sharedValues solution = csp.optimalAssignment(); CSP::sharedValues solution = csp.optimalAssignment();
CSP::Values expected; CSP::Values expected;
insert(expected) insert(expected)(csp.key(0, 0), 0)(csp.key(0, 1), 1)(csp.key(0, 2), 2)(
(csp.key(0,0), 0)(csp.key(0,1), 1)(csp.key(0,2), 2)(csp.key(0,3), 3) csp.key(0, 3), 3)(csp.key(1, 0), 2)(csp.key(1, 1), 3)(csp.key(1, 2), 0)(
(csp.key(1,0), 2)(csp.key(1,1), 3)(csp.key(1,2), 0)(csp.key(1,3), 1) csp.key(1, 3), 1)(csp.key(2, 0), 3)(csp.key(2, 1), 2)(csp.key(2, 2), 1)(
(csp.key(2,0), 3)(csp.key(2,1), 2)(csp.key(2,2), 1)(csp.key(2,3), 0) csp.key(2, 3), 0)(csp.key(3, 0), 1)(csp.key(3, 1), 0)(csp.key(3, 2), 3)(
(csp.key(3,0), 1)(csp.key(3,1), 0)(csp.key(3,2), 3)(csp.key(3,3), 2); csp.key(3, 3), 2);
EXPECT(assert_equal(expected,*solution)); EXPECT(assert_equal(expected, *solution));
//csp.printAssignment(solution); // csp.printAssignment(solution);
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST_UNSAFE( Sudoku, easy) TEST_UNSAFE(Sudoku, easy) {
{ Sudoku sudoku(9, //
Sudoku sudoku(9, 0, 0, 5, 0, 9, 0, 0, 0, 1, //
0,0,5, 0,9,0, 0,0,1, 0, 0, 0, 0, 0, 2, 0, 7, 3, //
0,0,0, 0,0,2, 0,7,3, 7, 6, 0, 0, 0, 8, 2, 0, 0, //
7,6,0, 0,0,8, 2,0,0,
0,1,2, 0,0,9, 0,0,4, 0, 1, 2, 0, 0, 9, 0, 0, 4, //
0,0,0, 2,0,3, 0,0,0, 0, 0, 0, 2, 0, 3, 0, 0, 0, //
3,0,0, 1,0,0, 9,6,0, 3, 0, 0, 1, 0, 0, 9, 6, 0, //
0,0,1, 9,0,0, 0,5,8, 0, 0, 1, 9, 0, 0, 0, 5, 8, //
9,7,0, 5,0,0, 0,0,0, 9, 7, 0, 5, 0, 0, 0, 0, 0, //
5,0,0, 0,3,0, 7,0,0); 5, 0, 0, 0, 3, 0, 7, 0, 0);
// Do BP // Do BP
sudoku.runArcConsistency(4,10,PRINT); sudoku.runArcConsistency(4, 10, PRINT);
// sudoku.printSolution(); // don't do it // sudoku.printSolution(); // don't do it
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST_UNSAFE( Sudoku, extreme) TEST_UNSAFE(Sudoku, extreme) {
{ Sudoku sudoku(9, //
Sudoku sudoku(9, 0, 0, 9, 7, 4, 8, 0, 0, 0, 7, //
0,0,9, 7,4,8, 0,0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, //
7,0,0, 0,0,0, 0,0,0, 0, 1, 0, 9, 0, 0, 0, 0, 0, 7, //
0,2,0, 1,0,9, 0,0,0, 0, 0, 0, 2, 4, 0, 0, 6, 4, 0, //
1, 0, 5, 9, 0, 0, 9, 8, 0, 0, //
0,0,7, 0,0,0, 2,4,0, 0, 3, 0, 0, 0, 0, 0, 8, 0, 3, //
0,6,4, 0,1,0, 5,9,0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, //
0,9,8, 0,0,0, 3,0,0, 0, 6, 0, 0, 0, 2, 7, 5, 9, 0, 0);
0,0,0, 8,0,3, 0,2,0,
0,0,0, 0,0,0, 0,0,6,
0,0,0, 2,7,5, 9,0,0);
// Do BP // Do BP
sudoku.runArcConsistency(9,10,PRINT); sudoku.runArcConsistency(9, 10, PRINT);
#ifdef METIS #ifdef METIS
VariableIndexOrdered index(sudoku); VariableIndexOrdered index(sudoku);
@ -185,29 +171,28 @@ TEST_UNSAFE( Sudoku, extreme)
index.outputMetisFormat(os); index.outputMetisFormat(os);
#endif #endif
//sudoku.printSolution(); // don't do it // sudoku.printSolution(); // don't do it
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST_UNSAFE( Sudoku, AJC_3star_Feb8_2012) TEST_UNSAFE(Sudoku, AJC_3star_Feb8_2012) {
{ Sudoku sudoku(9, //
Sudoku sudoku(9, 9, 5, 0, 0, 0, 6, 0, 0, 0, //
9,5,0, 0,0,6, 0,0,0, 0, 8, 4, 0, 7, 0, 0, 0, 0, //
0,8,4, 0,7,0, 0,0,0, 6, 2, 0, 5, 0, 0, 4, 0, 0, //
6,2,0, 5,0,0, 4,0,0,
0,0,0, 2,9,0, 6,0,0, 0, 0, 0, 2, 9, 0, 6, 0, 0, //
0,9,0, 0,0,0, 0,2,0, 0, 9, 0, 0, 0, 0, 0, 2, 0, //
0,0,2, 0,6,3, 0,0,0, 0, 0, 2, 0, 6, 3, 0, 0, 0, //
0,0,9, 0,0,7, 0,6,8, 0, 0, 9, 0, 0, 7, 0, 6, 8, //
0,0,0, 0,3,0, 2,9,0, 0, 0, 0, 0, 3, 0, 2, 9, 0, //
0,0,0, 1,0,0, 0,3,7); 0, 0, 0, 1, 0, 0, 0, 3, 7);
// Do BP // Do BP
sudoku.runArcConsistency(9,10,PRINT); sudoku.runArcConsistency(9, 10, PRINT);
//sudoku.printSolution(); // don't do it // sudoku.printSolution(); // don't do it
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -216,4 +201,3 @@ int main() {
return TestRegistry::runAllTests(tr); return TestRegistry::runAllTests(tr);
} }
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -226,7 +226,7 @@ pair<QP, QP> testParser(QPSParser parser) {
expected.inequalities.add(X1, -I_1x1, 0, 2); // x >= 0 expected.inequalities.add(X1, -I_1x1, 0, 2); // x >= 0
expected.inequalities.add(X2, -I_1x1, 0, 3); // y > = 0 expected.inequalities.add(X2, -I_1x1, 0, 3); // y > = 0
return {expected, exampleqp}; return {expected, exampleqp};
}; }
TEST(QPSolver, ParserSyntaticTest) { TEST(QPSolver, ParserSyntaticTest) {
auto result = testParser(QPSParser("QPExample.QPS")); auto result = testParser(QPSParser("QPExample.QPS"));

View File

@ -56,7 +56,8 @@ private:
bool flag_bump_up_near_zero_probs_; bool flag_bump_up_near_zero_probs_;
/** concept check by type */ /** concept check by type */
GTSAM_CONCEPT_LIE_TYPE(T)GTSAM_CONCEPT_TESTABLE_TYPE(T) GTSAM_CONCEPT_LIE_TYPE(T)
GTSAM_CONCEPT_TESTABLE_TYPE(T)
public: public:

View File

@ -0,0 +1,40 @@
# SLAM Factors
## SmartFactors
These are "structure-less" factors, i.e., rather than introducing a new variable for an observed 3D point or landmark, a single factor is created that provides a multi-view constraint on several poses and/or cameras.
### SmartRangeFactor
An experiment in creating a structure-less 2D range-SLAM factor with range-only measurements.
It uses a sophisticated `triangulate` logic based on circle intersections.
### SmartStereoProjectionFactor
Version of `SmartProjectionFactor` for stereo observations, specializes SmartFactorBase for `CAMERA == StereoCamera`.
TODO: a lot of commented out code and could move a lot to .cpp file.
### SmartStereoProjectionPoseFactor
Derives from `SmartStereoProjectionFactor` but adds an array of `Cal3_S2Stereo` calibration objects .
TODO: Again, as no template arguments, we could move a lot to .cpp file.
### SmartStereoProjectionFactorPP
Similar `SmartStereoProjectionPoseFactor` but *additionally* adds an array of body_P_cam poses. The dimensions seem to be hardcoded and the types defined in the SmartFactorBase have been re-defined.
The body_P_cam poses are optimized here!
TODO: See above, same issues as `SmartStereoProjectionPoseFactor`.
### SmartProjectionPoseFactorRollingShutter
Is templated on a `CAMERA` type and derives from `SmartProjectionFactor`.
This factor optimizes two consecutive poses of a body assuming a rolling
shutter model of the camera with given readout time. The factor requires that
values contain (for each 2D observation) two consecutive camera poses from
which the 2D observation pose can be interpolated.
TODO: the dimensions seem to be hardcoded and the types defined in the SmartFactorBase have been re-defined. Also, possibly a lot of copy/paste computation of things that (should) happen in base class.

View File

@ -43,13 +43,12 @@ namespace gtsam {
template <class CAMERA> template <class CAMERA>
class SmartProjectionPoseFactorRollingShutter class SmartProjectionPoseFactorRollingShutter
: public SmartProjectionFactor<CAMERA> { : public SmartProjectionFactor<CAMERA> {
public: private:
typedef SmartProjectionFactor<CAMERA> Base;
typedef SmartProjectionPoseFactorRollingShutter<CAMERA> This;
typedef typename CAMERA::CalibrationType CALIBRATION; typedef typename CAMERA::CalibrationType CALIBRATION;
protected: protected:
/// shared pointer to calibration object (one for each observation)
std::vector<boost::shared_ptr<CALIBRATION>> K_all_;
/// The keys of the pose of the body (with respect to an external world /// The keys of the pose of the body (with respect to an external world
/// frame): two consecutive poses for each observation /// frame): two consecutive poses for each observation
std::vector<std::pair<Key, Key>> world_P_body_key_pairs_; std::vector<std::pair<Key, Key>> world_P_body_key_pairs_;
@ -58,17 +57,19 @@ class SmartProjectionPoseFactorRollingShutter
/// pair of consecutive poses /// pair of consecutive poses
std::vector<double> alphas_; std::vector<double> alphas_;
/// Pose of the camera in the body frame /// one or more cameras taking observations (fixed poses wrt body + fixed
std::vector<Pose3> body_P_sensors_; /// intrinsics)
boost::shared_ptr<typename Base::Cameras> cameraRig_;
/// vector of camera Ids (one for each observation, in the same order),
/// identifying which camera took the measurement
FastVector<size_t> cameraIds_;
public: public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW EIGEN_MAKE_ALIGNED_OPERATOR_NEW
/// shorthand for base class type typedef CAMERA Camera;
typedef SmartProjectionFactor<PinholePose<CALIBRATION>> Base; typedef CameraSet<CAMERA> Cameras;
/// shorthand for this class
typedef SmartProjectionPoseFactorRollingShutter This;
/// shorthand for a smart pointer to a factor /// shorthand for a smart pointer to a factor
typedef boost::shared_ptr<This> shared_ptr; typedef boost::shared_ptr<This> shared_ptr;
@ -83,22 +84,37 @@ class SmartProjectionPoseFactorRollingShutter
typedef std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD>> typedef std::vector<MatrixZD, Eigen::aligned_allocator<MatrixZD>>
FBlocks; // vector of F blocks FBlocks; // vector of F blocks
/// Default constructor, only for serialization
SmartProjectionPoseFactorRollingShutter() {}
/** /**
* Constructor * Constructor
* @param Isotropic measurement noise * @param Isotropic measurement noise
* @param cameraRig set of cameras (fixed poses wrt body and intrinsics)
* taking the measurements
* @param params internal parameters of the smart factors * @param params internal parameters of the smart factors
*/ */
SmartProjectionPoseFactorRollingShutter( SmartProjectionPoseFactorRollingShutter(
const SharedNoiseModel& sharedNoiseModel, const SharedNoiseModel& sharedNoiseModel,
const boost::shared_ptr<Cameras>& cameraRig,
const SmartProjectionParams& params = SmartProjectionParams()) const SmartProjectionParams& params = SmartProjectionParams())
: Base(sharedNoiseModel, params) {} : Base(sharedNoiseModel, params), cameraRig_(cameraRig) {
// throw exception if configuration is not supported by this factor
if (Base::params_.degeneracyMode != gtsam::ZERO_ON_DEGENERACY)
throw std::runtime_error(
"SmartProjectionRigFactor: "
"degeneracyMode must be set to ZERO_ON_DEGENERACY");
if (Base::params_.linearizationMode != gtsam::HESSIAN)
throw std::runtime_error(
"SmartProjectionRigFactor: "
"linearizationMode must be set to HESSIAN");
}
/** Virtual destructor */ /** Virtual destructor */
~SmartProjectionPoseFactorRollingShutter() override = default; ~SmartProjectionPoseFactorRollingShutter() override = default;
/** /**
* add a new measurement, with 2 pose keys, interpolation factor, camera * add a new measurement, with 2 pose keys, interpolation factor, and cameraId
* (intrinsic and extrinsic) calibration, and observed pixel.
* @param measured 2-dimensional location of the projection of a single * @param measured 2-dimensional location of the projection of a single
* landmark in a single view (the measurement), interpolated from the 2 poses * landmark in a single view (the measurement), interpolated from the 2 poses
* @param world_P_body_key1 key corresponding to the first body poses (time <= * @param world_P_body_key1 key corresponding to the first body poses (time <=
@ -107,13 +123,11 @@ class SmartProjectionPoseFactorRollingShutter
* >= time pixel is acquired) * >= time pixel is acquired)
* @param alpha interpolation factor in [0,1], such that if alpha = 0 the * @param alpha interpolation factor in [0,1], such that if alpha = 0 the
* interpolated pose is the same as world_P_body_key1 * interpolated pose is the same as world_P_body_key1
* @param K (fixed) camera intrinsic calibration * @param cameraId ID of the camera taking the measurement (default 0)
* @param body_P_sensor (fixed) camera extrinsic calibration
*/ */
void add(const Point2& measured, const Key& world_P_body_key1, void add(const Point2& measured, const Key& world_P_body_key1,
const Key& world_P_body_key2, const double& alpha, const Key& world_P_body_key2, const double& alpha,
const boost::shared_ptr<CALIBRATION>& K, const size_t& cameraId = 0) {
const Pose3& body_P_sensor = Pose3::identity()) {
// store measurements in base class // store measurements in base class
this->measured_.push_back(measured); this->measured_.push_back(measured);
@ -133,11 +147,8 @@ class SmartProjectionPoseFactorRollingShutter
// store interpolation factor // store interpolation factor
alphas_.push_back(alpha); alphas_.push_back(alpha);
// store fixed intrinsic calibration // store id of the camera taking the measurement
K_all_.push_back(K); cameraIds_.push_back(cameraId);
// store fixed extrinsics of the camera
body_P_sensors_.push_back(body_P_sensor);
} }
/** /**
@ -150,56 +161,36 @@ class SmartProjectionPoseFactorRollingShutter
* for the i0-th measurement can be interpolated * for the i0-th measurement can be interpolated
* @param alphas vector of interpolation params (in [0,1]), one for each * @param alphas vector of interpolation params (in [0,1]), one for each
* measurement (in the same order) * measurement (in the same order)
* @param Ks vector of (fixed) intrinsic calibration objects * @param cameraIds IDs of the cameras taking each measurement (same order as
* @param body_P_sensors vector of (fixed) extrinsic calibration objects * the measurements)
*/ */
void add(const Point2Vector& measurements, void add(const Point2Vector& measurements,
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs, const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs,
const std::vector<double>& alphas, const std::vector<double>& alphas,
const std::vector<boost::shared_ptr<CALIBRATION>>& Ks, const FastVector<size_t>& cameraIds = FastVector<size_t>()) {
const std::vector<Pose3>& body_P_sensors) { if (world_P_body_key_pairs.size() != measurements.size() ||
assert(world_P_body_key_pairs.size() == measurements.size()); world_P_body_key_pairs.size() != alphas.size() ||
assert(world_P_body_key_pairs.size() == alphas.size()); (world_P_body_key_pairs.size() != cameraIds.size() &&
assert(world_P_body_key_pairs.size() == Ks.size()); cameraIds.size() != 0)) { // cameraIds.size()=0 is default
throw std::runtime_error(
"SmartProjectionPoseFactorRollingShutter: "
"trying to add inconsistent inputs");
}
if (cameraIds.size() == 0 && cameraRig_->size() > 1) {
throw std::runtime_error(
"SmartProjectionPoseFactorRollingShutter: "
"camera rig includes multiple camera "
"but add did not input cameraIds");
}
for (size_t i = 0; i < measurements.size(); i++) { for (size_t i = 0; i < measurements.size(); i++) {
add(measurements[i], world_P_body_key_pairs[i].first, add(measurements[i], world_P_body_key_pairs[i].first,
world_P_body_key_pairs[i].second, alphas[i], Ks[i], world_P_body_key_pairs[i].second, alphas[i],
body_P_sensors[i]); cameraIds.size() == 0 ? 0
: cameraIds[i]); // use 0 as default if
// cameraIds was not specified
} }
} }
/**
* Variant of the previous "add" function in which we include multiple
* measurements with the same (intrinsic and extrinsic) calibration
* @param measurements vector of the 2m dimensional location of the projection
* of a single landmark in the m views (the measurements)
* @param world_P_body_key_pairs vector where the i-th element contains a pair
* of keys corresponding to the pair of poses from which the observation pose
* for the i0-th measurement can be interpolated
* @param alphas vector of interpolation params (in [0,1]), one for each
* measurement (in the same order)
* @param K (fixed) camera intrinsic calibration (same for all measurements)
* @param body_P_sensor (fixed) camera extrinsic calibration (same for all
* measurements)
*/
void add(const Point2Vector& measurements,
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs,
const std::vector<double>& alphas,
const boost::shared_ptr<CALIBRATION>& K,
const Pose3& body_P_sensor = Pose3::identity()) {
assert(world_P_body_key_pairs.size() == measurements.size());
assert(world_P_body_key_pairs.size() == alphas.size());
for (size_t i = 0; i < measurements.size(); i++) {
add(measurements[i], world_P_body_key_pairs[i].first,
world_P_body_key_pairs[i].second, alphas[i], K, body_P_sensor);
}
}
/// return the calibration object
const std::vector<boost::shared_ptr<CALIBRATION>>& calibration() const {
return K_all_;
}
/// return (for each observation) the keys of the pair of poses from which we /// return (for each observation) the keys of the pair of poses from which we
/// interpolate /// interpolate
const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs() const { const std::vector<std::pair<Key, Key>>& world_P_body_key_pairs() const {
@ -209,8 +200,11 @@ class SmartProjectionPoseFactorRollingShutter
/// return the interpolation factors alphas /// return the interpolation factors alphas
const std::vector<double>& alphas() const { return alphas_; } const std::vector<double>& alphas() const { return alphas_; }
/// return the extrinsic camera calibration body_P_sensors /// return the calibration object
const std::vector<Pose3>& body_P_sensors() const { return body_P_sensors_; } const boost::shared_ptr<Cameras>& cameraRig() const { return cameraRig_; }
/// return the calibration object
const FastVector<size_t>& cameraIds() const { return cameraIds_; }
/** /**
* print * print
@ -221,15 +215,15 @@ class SmartProjectionPoseFactorRollingShutter
const std::string& s = "", const std::string& s = "",
const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override { const KeyFormatter& keyFormatter = DefaultKeyFormatter) const override {
std::cout << s << "SmartProjectionPoseFactorRollingShutter: \n "; std::cout << s << "SmartProjectionPoseFactorRollingShutter: \n ";
for (size_t i = 0; i < K_all_.size(); i++) { for (size_t i = 0; i < cameraIds_.size(); i++) {
std::cout << "-- Measurement nr " << i << std::endl; std::cout << "-- Measurement nr " << i << std::endl;
std::cout << " pose1 key: " std::cout << " pose1 key: "
<< keyFormatter(world_P_body_key_pairs_[i].first) << std::endl; << keyFormatter(world_P_body_key_pairs_[i].first) << std::endl;
std::cout << " pose2 key: " std::cout << " pose2 key: "
<< keyFormatter(world_P_body_key_pairs_[i].second) << std::endl; << keyFormatter(world_P_body_key_pairs_[i].second) << std::endl;
std::cout << " alpha: " << alphas_[i] << std::endl; std::cout << " alpha: " << alphas_[i] << std::endl;
body_P_sensors_[i].print("extrinsic calibration:\n"); std::cout << "cameraId: " << cameraIds_[i] << std::endl;
K_all_[i]->print("intrinsic calibration = "); (*cameraRig_)[cameraIds_[i]].print("camera in rig:\n");
} }
Base::print("", keyFormatter); Base::print("", keyFormatter);
} }
@ -257,20 +251,48 @@ class SmartProjectionPoseFactorRollingShutter
keyPairsEqual = false; keyPairsEqual = false;
} }
double extrinsicCalibrationEqual = true; return e && Base::equals(p, tol) && alphas_ == e->alphas() &&
if (this->body_P_sensors_.size() == e->body_P_sensors().size()) { keyPairsEqual && cameraRig_->equals(*(e->cameraRig())) &&
for (size_t i = 0; i < this->body_P_sensors_.size(); i++) { std::equal(cameraIds_.begin(), cameraIds_.end(),
if (!body_P_sensors_[i].equals(e->body_P_sensors()[i])) { e->cameraIds().begin());
extrinsicCalibrationEqual = false;
break;
}
}
} else {
extrinsicCalibrationEqual = false;
} }
return e && Base::equals(p, tol) && K_all_ == e->calibration() && /**
alphas_ == e->alphas() && keyPairsEqual && extrinsicCalibrationEqual; * Collect all cameras involved in this factor
* @param values Values structure which must contain camera poses
* corresponding to keys involved in this factor
* @return Cameras
*/
typename Base::Cameras cameras(const Values& values) const override {
typename Base::Cameras cameras;
for (size_t i = 0; i < this->measured_.size();
i++) { // for each measurement
const Pose3& w_P_body1 =
values.at<Pose3>(world_P_body_key_pairs_[i].first);
const Pose3& w_P_body2 =
values.at<Pose3>(world_P_body_key_pairs_[i].second);
double interpolationFactor = alphas_[i];
const Pose3& w_P_body =
interpolate<Pose3>(w_P_body1, w_P_body2, interpolationFactor);
const typename Base::Camera& camera_i = (*cameraRig_)[cameraIds_[i]];
const Pose3& body_P_cam = camera_i.pose();
const Pose3& w_P_cam = w_P_body.compose(body_P_cam);
cameras.emplace_back(w_P_cam,
make_shared<typename CAMERA::CalibrationType>(
camera_i.calibration()));
}
return cameras;
}
/**
* error calculates the error of the factor.
*/
double error(const Values& values) const override {
if (this->active(values)) {
return this->totalReprojectionError(this->cameras(values));
} else { // else of active flag
return 0.0;
}
} }
/** /**
@ -305,9 +327,10 @@ class SmartProjectionPoseFactorRollingShutter
auto w_P_body = auto w_P_body =
interpolate<Pose3>(w_P_body1, w_P_body2, interpolationFactor, interpolate<Pose3>(w_P_body1, w_P_body2, interpolationFactor,
dInterpPose_dPoseBody1, dInterpPose_dPoseBody2); dInterpPose_dPoseBody1, dInterpPose_dPoseBody2);
auto body_P_cam = body_P_sensors_[i]; const typename Base::Camera& camera_i = (*cameraRig_)[cameraIds_[i]];
auto body_P_cam = camera_i.pose();
auto w_P_cam = w_P_body.compose(body_P_cam, dPoseCam_dInterpPose); auto w_P_cam = w_P_body.compose(body_P_cam, dPoseCam_dInterpPose);
PinholeCamera<CALIBRATION> camera(w_P_cam, *K_all_[i]); PinholeCamera<CALIBRATION> camera(w_P_cam, camera_i.calibration());
// get jacobians and error vector for current measurement // get jacobians and error vector for current measurement
Point2 reprojectionError_i = Point2 reprojectionError_i =
@ -332,7 +355,7 @@ class SmartProjectionPoseFactorRollingShutter
/// linearize and return a Hessianfactor that is an approximation of error(p) /// linearize and return a Hessianfactor that is an approximation of error(p)
boost::shared_ptr<RegularHessianFactor<DimPose>> createHessianFactor( boost::shared_ptr<RegularHessianFactor<DimPose>> createHessianFactor(
const Values& values, const double lambda = 0.0, const Values& values, const double& lambda = 0.0,
bool diagonalDamping = false) const { bool diagonalDamping = false) const {
// we may have multiple observation sharing the same keys (due to the // we may have multiple observation sharing the same keys (due to the
// rolling shutter interpolation), hence the number of unique keys may be // rolling shutter interpolation), hence the number of unique keys may be
@ -341,19 +364,21 @@ class SmartProjectionPoseFactorRollingShutter
this->keys_ this->keys_
.size(); // note: by construction, keys_ only contains unique keys .size(); // note: by construction, keys_ only contains unique keys
typename Base::Cameras cameras = this->cameras(values);
// Create structures for Hessian Factors // Create structures for Hessian Factors
KeyVector js; KeyVector js;
std::vector<Matrix> Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2); std::vector<Matrix> Gs(nrUniqueKeys * (nrUniqueKeys + 1) / 2);
std::vector<Vector> gs(nrUniqueKeys); std::vector<Vector> gs(nrUniqueKeys);
if (this->measured_.size() != if (this->measured_.size() !=
this->cameras(values).size()) // 1 observation per interpolated camera cameras.size()) // 1 observation per interpolated camera
throw std::runtime_error( throw std::runtime_error(
"SmartProjectionPoseFactorRollingShutter: " "SmartProjectionPoseFactorRollingShutter: "
"measured_.size() inconsistent with input"); "measured_.size() inconsistent with input");
// triangulate 3D point at given linearization point // triangulate 3D point at given linearization point
this->triangulateSafe(this->cameras(values)); this->triangulateSafe(cameras);
if (!this->result_) { // failed: return "empty/zero" Hessian if (!this->result_) { // failed: return "empty/zero" Hessian
if (this->params_.degeneracyMode == ZERO_ON_DEGENERACY) { if (this->params_.degeneracyMode == ZERO_ON_DEGENERACY) {
@ -399,46 +424,6 @@ class SmartProjectionPoseFactorRollingShutter
this->keys_, augmentedHessianUniqueKeys); this->keys_, augmentedHessianUniqueKeys);
} }
/**
* error calculates the error of the factor.
*/
double error(const Values& values) const override {
if (this->active(values)) {
return this->totalReprojectionError(this->cameras(values));
} else { // else of active flag
return 0.0;
}
}
/**
* Collect all cameras involved in this factor
* @param values Values structure which must contain camera poses
* corresponding to keys involved in this factor
* @return Cameras
*/
typename Base::Cameras cameras(const Values& values) const override {
size_t numViews = this->measured_.size();
assert(numViews == K_all_.size());
assert(numViews == alphas_.size());
assert(numViews == body_P_sensors_.size());
assert(numViews == world_P_body_key_pairs_.size());
typename Base::Cameras cameras;
for (size_t i = 0; i < numViews; i++) { // for each measurement
const Pose3& w_P_body1 =
values.at<Pose3>(world_P_body_key_pairs_[i].first);
const Pose3& w_P_body2 =
values.at<Pose3>(world_P_body_key_pairs_[i].second);
double interpolationFactor = alphas_[i];
const Pose3& w_P_body =
interpolate<Pose3>(w_P_body1, w_P_body2, interpolationFactor);
const Pose3& body_P_cam = body_P_sensors_[i];
const Pose3& w_P_cam = w_P_body.compose(body_P_cam);
cameras.emplace_back(w_P_cam, K_all_[i]);
}
return cameras;
}
/** /**
* Linearize to Gaussian Factor (possibly adding a damping factor Lambda for * Linearize to Gaussian Factor (possibly adding a damping factor Lambda for
* LM) * LM)
@ -447,7 +432,7 @@ class SmartProjectionPoseFactorRollingShutter
* @return a Gaussian factor * @return a Gaussian factor
*/ */
boost::shared_ptr<GaussianFactor> linearizeDamped( boost::shared_ptr<GaussianFactor> linearizeDamped(
const Values& values, const double lambda = 0.0) const { const Values& values, const double& lambda = 0.0) const {
// depending on flag set on construction we may linearize to different // depending on flag set on construction we may linearize to different
// linear factors // linear factors
switch (this->params_.linearizationMode) { switch (this->params_.linearizationMode) {
@ -455,8 +440,8 @@ class SmartProjectionPoseFactorRollingShutter
return this->createHessianFactor(values, lambda); return this->createHessianFactor(values, lambda);
default: default:
throw std::runtime_error( throw std::runtime_error(
"SmartProjectionPoseFactorRollingShutter: unknown linearization " "SmartProjectionPoseFactorRollingShutter: "
"mode"); "unknown linearization mode");
} }
} }
@ -472,7 +457,6 @@ class SmartProjectionPoseFactorRollingShutter
template <class ARCHIVE> template <class ARCHIVE>
void serialize(ARCHIVE& ar, const unsigned int /*version*/) { void serialize(ARCHIVE& ar, const unsigned int /*version*/) {
ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base); ar& BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
ar& BOOST_SERIALIZATION_NVP(K_all_);
} }
}; };
// end of class declaration // end of class declaration

View File

@ -11,7 +11,7 @@
/** /**
* @file SmartStereoProjectionFactor.h * @file SmartStereoProjectionFactor.h
* @brief Smart stereo factor on StereoCameras (pose + calibration) * @brief Smart stereo factor on StereoCameras (pose)
* @author Luca Carlone * @author Luca Carlone
* @author Zsolt Kira * @author Zsolt Kira
* @author Frank Dellaert * @author Frank Dellaert
@ -447,23 +447,23 @@ public:
} }
/** /**
* This corrects the Jacobians and error vector for the case in which the right pixel in the monocular camera is missing (nan) * This corrects the Jacobians and error vector for the case in which the
* right 2D measurement in the monocular camera is missing (nan).
*/ */
void correctForMissingMeasurements(const Cameras& cameras, Vector& ue, void correctForMissingMeasurements(
const Cameras& cameras, Vector& ue,
boost::optional<typename Cameras::FBlocks&> Fs = boost::none, boost::optional<typename Cameras::FBlocks&> Fs = boost::none,
boost::optional<Matrix&> E = boost::none) const override boost::optional<Matrix&> E = boost::none) const override {
{
// when using stereo cameras, some of the measurements might be missing: // when using stereo cameras, some of the measurements might be missing:
for(size_t i=0; i < cameras.size(); i++){ for (size_t i = 0; i < cameras.size(); i++) {
const StereoPoint2& z = measured_.at(i); const StereoPoint2& z = measured_.at(i);
if(std::isnan(z.uR())) // if the right pixel is invalid if (std::isnan(z.uR())) // if the right 2D measurement is invalid
{ {
if(Fs){ // delete influence of right point on jacobian Fs if (Fs) { // delete influence of right point on jacobian Fs
MatrixZD& Fi = Fs->at(i); MatrixZD& Fi = Fs->at(i);
for(size_t ii=0; ii<Dim; ii++) for (size_t ii = 0; ii < Dim; ii++) Fi(1, ii) = 0.0;
Fi(1,ii) = 0.0;
} }
if(E) // delete influence of right point on jacobian E if (E) // delete influence of right point on jacobian E
E->row(ZDim * i + 1) = Matrix::Zero(1, E->cols()); E->row(ZDim * i + 1) = Matrix::Zero(1, E->cols());
// set the corresponding entry of vector ue to zero // set the corresponding entry of vector ue to zero

View File

@ -33,9 +33,11 @@ namespace gtsam {
*/ */
/** /**
* This factor optimizes the pose of the body as well as the extrinsic camera calibration (pose of camera wrt body). * This factor optimizes the pose of the body as well as the extrinsic camera
* Each camera may have its own extrinsic calibration or the same calibration can be shared by multiple cameras. * calibration (pose of camera wrt body). Each camera may have its own extrinsic
* This factor requires that values contain the involved poses and extrinsics (both are Pose3 variables). * calibration or the same calibration can be shared by multiple cameras. This
* factor requires that values contain the involved poses and extrinsics (both
* are Pose3 variables).
* @addtogroup SLAM * @addtogroup SLAM
*/ */
class SmartStereoProjectionFactorPP : public SmartStereoProjectionFactor { class SmartStereoProjectionFactorPP : public SmartStereoProjectionFactor {

View File

@ -61,10 +61,13 @@ static double interp_factor1 = 0.3;
static double interp_factor2 = 0.4; static double interp_factor2 = 0.4;
static double interp_factor3 = 0.5; static double interp_factor3 = 0.5;
static size_t cameraId1 = 0;
/* ************************************************************************* */ /* ************************************************************************* */
// default Cal3_S2 poses with rolling shutter effect // default Cal3_S2 poses with rolling shutter effect
namespace vanillaPoseRS { namespace vanillaPoseRS {
typedef PinholePose<Cal3_S2> Camera; typedef PinholePose<Cal3_S2> Camera;
typedef CameraSet<Camera> Cameras;
static Cal3_S2::shared_ptr sharedK(new Cal3_S2(fov, w, h)); static Cal3_S2::shared_ptr sharedK(new Cal3_S2(fov, w, h));
Pose3 interp_pose1 = interpolate<Pose3>(level_pose, pose_right, interp_factor1); Pose3 interp_pose1 = interpolate<Pose3>(level_pose, pose_right, interp_factor1);
Pose3 interp_pose2 = interpolate<Pose3>(pose_right, pose_above, interp_factor2); Pose3 interp_pose2 = interpolate<Pose3>(pose_right, pose_above, interp_factor2);
@ -72,6 +75,9 @@ Pose3 interp_pose3 = interpolate<Pose3>(pose_above, level_pose, interp_factor3);
Camera cam1(interp_pose1, sharedK); Camera cam1(interp_pose1, sharedK);
Camera cam2(interp_pose2, sharedK); Camera cam2(interp_pose2, sharedK);
Camera cam3(interp_pose3, sharedK); Camera cam3(interp_pose3, sharedK);
SmartProjectionParams params(
gtsam::HESSIAN,
gtsam::ZERO_ON_DEGENERACY); // only config that works with RS factors
} // namespace vanillaPoseRS } // namespace vanillaPoseRS
LevenbergMarquardtParams lmParams; LevenbergMarquardtParams lmParams;
@ -80,26 +86,35 @@ typedef SmartProjectionPoseFactorRollingShutter<PinholePose<Cal3_S2>>
/* ************************************************************************* */ /* ************************************************************************* */
TEST(SmartProjectionPoseFactorRollingShutter, Constructor) { TEST(SmartProjectionPoseFactorRollingShutter, Constructor) {
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); using namespace vanillaPoseRS;
boost::shared_ptr<Cameras> cameraRig(new Cameras());
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
SmartFactorRS::shared_ptr factor1(
new SmartFactorRS(model, cameraRig, params));
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(SmartProjectionPoseFactorRollingShutter, Constructor2) { TEST(SmartProjectionPoseFactorRollingShutter, Constructor2) {
SmartProjectionParams params; using namespace vanillaPoseRS;
boost::shared_ptr<Cameras> cameraRig(new Cameras());
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
params.setRankTolerance(rankTol); params.setRankTolerance(rankTol);
SmartFactorRS factor1(model, params); SmartFactorRS factor1(model, cameraRig, params);
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(SmartProjectionPoseFactorRollingShutter, add) { TEST(SmartProjectionPoseFactorRollingShutter, add) {
using namespace vanillaPose; using namespace vanillaPoseRS;
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); boost::shared_ptr<Cameras> cameraRig(new Cameras());
factor1->add(measurement1, x1, x2, interp_factor, sharedK, body_P_sensor); cameraRig->push_back(Camera(Pose3::identity(), sharedK));
SmartFactorRS::shared_ptr factor1(
new SmartFactorRS(model, cameraRig, params));
factor1->add(measurement1, x1, x2, interp_factor);
} }
/* ************************************************************************* */ /* ************************************************************************* */
TEST(SmartProjectionPoseFactorRollingShutter, Equals) { TEST(SmartProjectionPoseFactorRollingShutter, Equals) {
using namespace vanillaPose; using namespace vanillaPoseRS;
// create fake measurements // create fake measurements
Point2Vector measurements; Point2Vector measurements;
@ -112,68 +127,88 @@ TEST(SmartProjectionPoseFactorRollingShutter, Equals) {
key_pairs.push_back(std::make_pair(x2, x3)); key_pairs.push_back(std::make_pair(x2, x3));
key_pairs.push_back(std::make_pair(x3, x4)); key_pairs.push_back(std::make_pair(x3, x4));
std::vector<boost::shared_ptr<Cal3_S2>> intrinsicCalibrations;
intrinsicCalibrations.push_back(sharedK);
intrinsicCalibrations.push_back(sharedK);
intrinsicCalibrations.push_back(sharedK);
std::vector<Pose3> extrinsicCalibrations;
extrinsicCalibrations.push_back(body_P_sensor);
extrinsicCalibrations.push_back(body_P_sensor);
extrinsicCalibrations.push_back(body_P_sensor);
std::vector<double> interp_factors; std::vector<double> interp_factors;
interp_factors.push_back(interp_factor1); interp_factors.push_back(interp_factor1);
interp_factors.push_back(interp_factor2); interp_factors.push_back(interp_factor2);
interp_factors.push_back(interp_factor3); interp_factors.push_back(interp_factor3);
FastVector<size_t> cameraIds{0, 0, 0};
boost::shared_ptr<Cameras> cameraRig(new Cameras());
cameraRig->push_back(Camera(body_P_sensor, sharedK));
// create by adding a batch of measurements with a bunch of calibrations // create by adding a batch of measurements with a bunch of calibrations
SmartFactorRS::shared_ptr factor2(new SmartFactorRS(model)); SmartFactorRS::shared_ptr factor2(
factor2->add(measurements, key_pairs, interp_factors, intrinsicCalibrations, new SmartFactorRS(model, cameraRig, params));
extrinsicCalibrations); factor2->add(measurements, key_pairs, interp_factors, cameraIds);
// create by adding a batch of measurements with a single calibrations // create by adding a batch of measurements with a single calibrations
SmartFactorRS::shared_ptr factor3(new SmartFactorRS(model)); SmartFactorRS::shared_ptr factor3(
factor3->add(measurements, key_pairs, interp_factors, sharedK, body_P_sensor); new SmartFactorRS(model, cameraRig, params));
factor3->add(measurements, key_pairs, interp_factors, cameraIds);
{ // create equal factors and show equal returns true { // create equal factors and show equal returns true
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); SmartFactorRS::shared_ptr factor1(
factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor); new SmartFactorRS(model, cameraRig, params));
factor1->add(measurement2, x2, x3, interp_factor2, sharedK, body_P_sensor); factor1->add(measurement1, x1, x2, interp_factor1, cameraId1);
factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor); factor1->add(measurement2, x2, x3, interp_factor2, cameraId1);
factor1->add(measurement3, x3, x4, interp_factor3, cameraId1);
EXPECT(factor1->equals(*factor2));
EXPECT(factor1->equals(*factor3));
}
{ // create equal factors and show equal returns true (use default cameraId)
SmartFactorRS::shared_ptr factor1(
new SmartFactorRS(model, cameraRig, params));
factor1->add(measurement1, x1, x2, interp_factor1);
factor1->add(measurement2, x2, x3, interp_factor2);
factor1->add(measurement3, x3, x4, interp_factor3);
EXPECT(factor1->equals(*factor2));
EXPECT(factor1->equals(*factor3));
}
{ // create equal factors and show equal returns true (use default cameraId)
SmartFactorRS::shared_ptr factor1(
new SmartFactorRS(model, cameraRig, params));
factor1->add(measurements, key_pairs, interp_factors);
EXPECT(factor1->equals(*factor2)); EXPECT(factor1->equals(*factor2));
EXPECT(factor1->equals(*factor3)); EXPECT(factor1->equals(*factor3));
} }
{ // create slightly different factors (different keys) and show equal { // create slightly different factors (different keys) and show equal
// returns false // returns false (use default cameraIds)
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); SmartFactorRS::shared_ptr factor1(
factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor); new SmartFactorRS(model, cameraRig, params));
factor1->add(measurement2, x2, x2, interp_factor2, sharedK, factor1->add(measurement1, x1, x2, interp_factor1, cameraId1);
body_P_sensor); // different! factor1->add(measurement2, x2, x2, interp_factor2,
factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor); cameraId1); // different!
factor1->add(measurement3, x3, x4, interp_factor3, cameraId1);
EXPECT(!factor1->equals(*factor2)); EXPECT(!factor1->equals(*factor2));
EXPECT(!factor1->equals(*factor3)); EXPECT(!factor1->equals(*factor3));
} }
{ // create slightly different factors (different extrinsics) and show equal { // create slightly different factors (different extrinsics) and show equal
// returns false // returns false
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); boost::shared_ptr<Cameras> cameraRig2(new Cameras());
factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor); cameraRig2->push_back(Camera(body_P_sensor * body_P_sensor, sharedK));
factor1->add(measurement2, x2, x3, interp_factor2, sharedK, SmartFactorRS::shared_ptr factor1(
body_P_sensor * body_P_sensor); // different! new SmartFactorRS(model, cameraRig2, params));
factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor); factor1->add(measurement1, x1, x2, interp_factor1, cameraId1);
factor1->add(measurement2, x2, x3, interp_factor2,
cameraId1); // different!
factor1->add(measurement3, x3, x4, interp_factor3, cameraId1);
EXPECT(!factor1->equals(*factor2)); EXPECT(!factor1->equals(*factor2));
EXPECT(!factor1->equals(*factor3)); EXPECT(!factor1->equals(*factor3));
} }
{ // create slightly different factors (different interp factors) and show { // create slightly different factors (different interp factors) and show
// equal returns false // equal returns false
SmartFactorRS::shared_ptr factor1(new SmartFactorRS(model)); SmartFactorRS::shared_ptr factor1(
factor1->add(measurement1, x1, x2, interp_factor1, sharedK, body_P_sensor); new SmartFactorRS(model, cameraRig, params));
factor1->add(measurement2, x2, x3, interp_factor1, sharedK, factor1->add(measurement1, x1, x2, interp_factor1, cameraId1);
body_P_sensor); // different! factor1->add(measurement2, x2, x3, interp_factor1,
factor1->add(measurement3, x3, x4, interp_factor3, sharedK, body_P_sensor); cameraId1); // different!
factor1->add(measurement3, x3, x4, interp_factor3, cameraId1);
EXPECT(!factor1->equals(*factor2)); EXPECT(!factor1->equals(*factor2));
EXPECT(!factor1->equals(*factor3)); EXPECT(!factor1->equals(*factor3));
@ -197,9 +232,12 @@ TEST(SmartProjectionPoseFactorRollingShutter, noiselessErrorAndJacobians) {
Point2 level_uv_right = cam2.project(landmark1); Point2 level_uv_right = cam2.project(landmark1);
Pose3 body_P_sensorId = Pose3::identity(); Pose3 body_P_sensorId = Pose3::identity();
SmartFactorRS factor(model); boost::shared_ptr<Cameras> cameraRig(new Cameras());
factor.add(level_uv, x1, x2, interp_factor1, sharedK, body_P_sensorId); cameraRig->push_back(Camera(body_P_sensorId, sharedK));
factor.add(level_uv_right, x2, x3, interp_factor2, sharedK, body_P_sensorId);
SmartFactorRS factor(model, cameraRig, params);
factor.add(level_uv, x1, x2, interp_factor1);
factor.add(level_uv_right, x2, x3, interp_factor2);
Values values; // it's a pose factor, hence these are poses Values values; // it's a pose factor, hence these are poses
values.insert(x1, level_pose); values.insert(x1, level_pose);
@ -272,10 +310,12 @@ TEST(SmartProjectionPoseFactorRollingShutter, noisyErrorAndJacobians) {
Point2 level_uv_right = cam2.project(landmark1); Point2 level_uv_right = cam2.project(landmark1);
Pose3 body_P_sensorNonId = body_P_sensor; Pose3 body_P_sensorNonId = body_P_sensor;
SmartFactorRS factor(model); boost::shared_ptr<Cameras> cameraRig(new Cameras());
factor.add(level_uv, x1, x2, interp_factor1, sharedK, body_P_sensorNonId); cameraRig->push_back(Camera(body_P_sensorNonId, sharedK));
factor.add(level_uv_right, x2, x3, interp_factor2, sharedK,
body_P_sensorNonId); SmartFactorRS factor(model, cameraRig, params);
factor.add(level_uv, x1, x2, interp_factor1);
factor.add(level_uv_right, x2, x3, interp_factor2);
Values values; // it's a pose factor, hence these are poses Values values; // it's a pose factor, hence these are poses
values.insert(x1, level_pose); values.insert(x1, level_pose);
@ -364,14 +404,20 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses) {
interp_factors.push_back(interp_factor2); interp_factors.push_back(interp_factor2);
interp_factors.push_back(interp_factor3); interp_factors.push_back(interp_factor3);
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model)); boost::shared_ptr<Cameras> cameraRig(new Cameras());
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, sharedK); cameraRig->push_back(Camera(Pose3::identity(), sharedK));
SmartFactorRS::shared_ptr smartFactor2(new SmartFactorRS(model)); SmartFactorRS::shared_ptr smartFactor1(
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, sharedK); new SmartFactorRS(model, cameraRig, params));
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors);
SmartFactorRS::shared_ptr smartFactor3(new SmartFactorRS(model)); SmartFactorRS::shared_ptr smartFactor2(
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, sharedK); new SmartFactorRS(model, cameraRig, params));
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors);
SmartFactorRS::shared_ptr smartFactor3(
new SmartFactorRS(model, cameraRig, params));
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors);
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
@ -411,6 +457,170 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses) {
EXPECT(assert_equal(pose_above, result.at<Pose3>(x3), 1e-6)); EXPECT(assert_equal(pose_above, result.at<Pose3>(x3), 1e-6));
} }
/* *************************************************************************/
TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses_multiCam) {
using namespace vanillaPoseRS;
Point2Vector measurements_lmk1, measurements_lmk2, measurements_lmk3;
// Project three landmarks into three cameras
projectToMultipleCameras(cam1, cam2, cam3, landmark1, measurements_lmk1);
projectToMultipleCameras(cam1, cam2, cam3, landmark2, measurements_lmk2);
projectToMultipleCameras(cam1, cam2, cam3, landmark3, measurements_lmk3);
// create inputs
std::vector<std::pair<Key, Key>> key_pairs;
key_pairs.push_back(std::make_pair(x1, x2));
key_pairs.push_back(std::make_pair(x2, x3));
key_pairs.push_back(std::make_pair(x3, x1));
std::vector<double> interp_factors;
interp_factors.push_back(interp_factor1);
interp_factors.push_back(interp_factor2);
interp_factors.push_back(interp_factor3);
boost::shared_ptr<Cameras> cameraRig(new Cameras());
cameraRig->push_back(Camera(body_P_sensor, sharedK));
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
SmartFactorRS::shared_ptr smartFactor1(
new SmartFactorRS(model, cameraRig, params));
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, {1, 1, 1});
SmartFactorRS::shared_ptr smartFactor2(
new SmartFactorRS(model, cameraRig, params));
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, {1, 1, 1});
SmartFactorRS::shared_ptr smartFactor3(
new SmartFactorRS(model, cameraRig, params));
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, {1, 1, 1});
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
NonlinearFactorGraph graph;
graph.push_back(smartFactor1);
graph.push_back(smartFactor2);
graph.push_back(smartFactor3);
graph.addPrior(x1, level_pose, noisePrior);
graph.addPrior(x2, pose_right, noisePrior);
Values groundTruth;
groundTruth.insert(x1, level_pose);
groundTruth.insert(x2, pose_right);
groundTruth.insert(x3, pose_above); // pose above is the pose of the camera
DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9);
// Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10),
// Point3(0.5,0.1,0.3)); // noise from regular projection factor test below
Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100),
Point3(0.1, 0.1, 0.1)); // smaller noise
Values values;
values.insert(x1, level_pose);
values.insert(x2, pose_right);
// initialize third pose with some noise, we expect it to move back to
// original pose_above
values.insert(x3, pose_above * noise_pose);
EXPECT( // check that the pose is actually noisy
assert_equal(Pose3(Rot3(0, -0.0314107591, 0.99950656, -0.99950656,
-0.0313952598, -0.000986635786, 0.0314107591,
-0.999013364, -0.0313952598),
Point3(0.1, -0.1, 1.9)),
values.at<Pose3>(x3)));
Values result;
LevenbergMarquardtOptimizer optimizer(graph, values, lmParams);
result = optimizer.optimize();
EXPECT(assert_equal(pose_above, result.at<Pose3>(x3), 1e-6));
}
/* *************************************************************************/
TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses_multiCam2) {
using namespace vanillaPoseRS;
Point2Vector measurements_lmk1, measurements_lmk2, measurements_lmk3;
// create arbitrary body_T_sensor (transforms from sensor to body)
Pose3 body_T_sensor1 = Pose3(Rot3::Ypr(-0.03, 0., 0.01), Point3(1, 1, 1));
Pose3 body_T_sensor2 = Pose3(Rot3::Ypr(-0.1, 0., 0.05), Point3(0, 0, 1));
Pose3 body_T_sensor3 = Pose3(Rot3::Ypr(-0.3, 0., -0.05), Point3(0, 1, 1));
Camera camera1(interp_pose1 * body_T_sensor1, sharedK);
Camera camera2(interp_pose2 * body_T_sensor2, sharedK);
Camera camera3(interp_pose3 * body_T_sensor3, sharedK);
// Project three landmarks into three cameras
projectToMultipleCameras(camera1, camera2, camera3, landmark1,
measurements_lmk1);
projectToMultipleCameras(camera1, camera2, camera3, landmark2,
measurements_lmk2);
projectToMultipleCameras(camera1, camera2, camera3, landmark3,
measurements_lmk3);
// create inputs
std::vector<std::pair<Key, Key>> key_pairs;
key_pairs.push_back(std::make_pair(x1, x2));
key_pairs.push_back(std::make_pair(x2, x3));
key_pairs.push_back(std::make_pair(x3, x1));
std::vector<double> interp_factors;
interp_factors.push_back(interp_factor1);
interp_factors.push_back(interp_factor2);
interp_factors.push_back(interp_factor3);
boost::shared_ptr<Cameras> cameraRig(new Cameras());
cameraRig->push_back(Camera(body_T_sensor1, sharedK));
cameraRig->push_back(Camera(body_T_sensor2, sharedK));
cameraRig->push_back(Camera(body_T_sensor3, sharedK));
SmartFactorRS::shared_ptr smartFactor1(
new SmartFactorRS(model, cameraRig, params));
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, {0, 1, 2});
SmartFactorRS::shared_ptr smartFactor2(
new SmartFactorRS(model, cameraRig, params));
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, {0, 1, 2});
SmartFactorRS::shared_ptr smartFactor3(
new SmartFactorRS(model, cameraRig, params));
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, {0, 1, 2});
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
NonlinearFactorGraph graph;
graph.push_back(smartFactor1);
graph.push_back(smartFactor2);
graph.push_back(smartFactor3);
graph.addPrior(x1, level_pose, noisePrior);
graph.addPrior(x2, pose_right, noisePrior);
Values groundTruth;
groundTruth.insert(x1, level_pose);
groundTruth.insert(x2, pose_right);
groundTruth.insert(x3, pose_above); // pose above is the pose of the camera
DOUBLES_EQUAL(0, graph.error(groundTruth), 1e-9);
// Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI/10, 0., -M_PI/10),
// Point3(0.5,0.1,0.3)); // noise from regular projection factor test below
Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100),
Point3(0.1, 0.1, 0.1)); // smaller noise
Values values;
values.insert(x1, level_pose);
values.insert(x2, pose_right);
// initialize third pose with some noise, we expect it to move back to
// original pose_above
values.insert(x3, pose_above * noise_pose);
EXPECT( // check that the pose is actually noisy
assert_equal(Pose3(Rot3(0, -0.0314107591, 0.99950656, -0.99950656,
-0.0313952598, -0.000986635786, 0.0314107591,
-0.999013364, -0.0313952598),
Point3(0.1, -0.1, 1.9)),
values.at<Pose3>(x3)));
Values result;
LevenbergMarquardtOptimizer optimizer(graph, values, lmParams);
result = optimizer.optimize();
EXPECT(assert_equal(pose_above, result.at<Pose3>(x3), 1e-4));
}
/* *************************************************************************/ /* *************************************************************************/
TEST(SmartProjectionPoseFactorRollingShutter, hessian_simple_2poses) { TEST(SmartProjectionPoseFactorRollingShutter, hessian_simple_2poses) {
// here we replicate a test in SmartProjectionPoseFactor by setting // here we replicate a test in SmartProjectionPoseFactor by setting
@ -418,7 +628,7 @@ TEST(SmartProjectionPoseFactorRollingShutter, hessian_simple_2poses) {
// falls back to standard pixel measurements) Note: this is a quite extreme // falls back to standard pixel measurements) Note: this is a quite extreme
// test since in typical camera you would not have more than 1 measurement per // test since in typical camera you would not have more than 1 measurement per
// landmark at each interpolated pose // landmark at each interpolated pose
using namespace vanillaPose; using namespace vanillaPoseRS;
// Default cameras for simple derivatives // Default cameras for simple derivatives
static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0)); static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0));
@ -438,15 +648,17 @@ TEST(SmartProjectionPoseFactorRollingShutter, hessian_simple_2poses) {
measurements_lmk1.push_back(cam1.project(landmark1)); measurements_lmk1.push_back(cam1.project(landmark1));
measurements_lmk1.push_back(cam2.project(landmark1)); measurements_lmk1.push_back(cam2.project(landmark1));
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model)); boost::shared_ptr<Cameras> cameraRig(new Cameras());
double interp_factor = 0; // equivalent to measurement taken at pose 1 cameraRig->push_back(Camera(body_P_sensorId, sharedKSimple));
smartFactor1->add(measurements_lmk1[0], x1, x2, interp_factor, sharedKSimple,
body_P_sensorId);
interp_factor = 1; // equivalent to measurement taken at pose 2
smartFactor1->add(measurements_lmk1[1], x1, x2, interp_factor, sharedKSimple,
body_P_sensorId);
SmartFactor::Cameras cameras; SmartFactorRS::shared_ptr smartFactor1(
new SmartFactorRS(model, cameraRig, params));
double interp_factor = 0; // equivalent to measurement taken at pose 1
smartFactor1->add(measurements_lmk1[0], x1, x2, interp_factor);
interp_factor = 1; // equivalent to measurement taken at pose 2
smartFactor1->add(measurements_lmk1[1], x1, x2, interp_factor);
SmartFactorRS::Cameras cameras;
cameras.push_back(cam1); cameras.push_back(cam1);
cameras.push_back(cam2); cameras.push_back(cam2);
@ -534,14 +746,17 @@ TEST(SmartProjectionPoseFactorRollingShutter, optimization_3poses_EPI) {
params.setLandmarkDistanceThreshold(excludeLandmarksFutherThanDist); params.setLandmarkDistanceThreshold(excludeLandmarksFutherThanDist);
params.setEnableEPI(true); params.setEnableEPI(true);
SmartFactorRS smartFactor1(model, params); boost::shared_ptr<Cameras> cameraRig(new Cameras());
smartFactor1.add(measurements_lmk1, key_pairs, interp_factors, sharedK); cameraRig->push_back(Camera(Pose3::identity(), sharedK));
SmartFactorRS smartFactor2(model, params); SmartFactorRS smartFactor1(model, cameraRig, params);
smartFactor2.add(measurements_lmk2, key_pairs, interp_factors, sharedK); smartFactor1.add(measurements_lmk1, key_pairs, interp_factors);
SmartFactorRS smartFactor3(model, params); SmartFactorRS smartFactor2(model, cameraRig, params);
smartFactor3.add(measurements_lmk3, key_pairs, interp_factors, sharedK); smartFactor2.add(measurements_lmk2, key_pairs, interp_factors);
SmartFactorRS smartFactor3(model, cameraRig, params);
smartFactor3.add(measurements_lmk3, key_pairs, interp_factors);
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
@ -594,18 +809,23 @@ TEST(SmartProjectionPoseFactorRollingShutter,
SmartProjectionParams params; SmartProjectionParams params;
params.setRankTolerance(1.0); params.setRankTolerance(1.0);
params.setLinearizationMode(gtsam::HESSIAN); params.setLinearizationMode(gtsam::HESSIAN);
params.setDegeneracyMode(gtsam::IGNORE_DEGENERACY); // params.setDegeneracyMode(gtsam::IGNORE_DEGENERACY); // this would give an
// exception as expected
params.setDegeneracyMode(gtsam::ZERO_ON_DEGENERACY);
params.setLandmarkDistanceThreshold(excludeLandmarksFutherThanDist); params.setLandmarkDistanceThreshold(excludeLandmarksFutherThanDist);
params.setEnableEPI(false); params.setEnableEPI(false);
SmartFactorRS smartFactor1(model, params); boost::shared_ptr<Cameras> cameraRig(new Cameras());
smartFactor1.add(measurements_lmk1, key_pairs, interp_factors, sharedK); cameraRig->push_back(Camera(Pose3::identity(), sharedK));
SmartFactorRS smartFactor2(model, params); SmartFactorRS smartFactor1(model, cameraRig, params);
smartFactor2.add(measurements_lmk2, key_pairs, interp_factors, sharedK); smartFactor1.add(measurements_lmk1, key_pairs, interp_factors);
SmartFactorRS smartFactor3(model, params); SmartFactorRS smartFactor2(model, cameraRig, params);
smartFactor3.add(measurements_lmk3, key_pairs, interp_factors, sharedK); smartFactor2.add(measurements_lmk2, key_pairs, interp_factors);
SmartFactorRS smartFactor3(model, cameraRig, params);
smartFactor3.add(measurements_lmk3, key_pairs, interp_factors);
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
@ -673,17 +893,24 @@ TEST(SmartProjectionPoseFactorRollingShutter,
params.setDynamicOutlierRejectionThreshold(dynamicOutlierRejectionThreshold); params.setDynamicOutlierRejectionThreshold(dynamicOutlierRejectionThreshold);
params.setEnableEPI(false); params.setEnableEPI(false);
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model, params)); boost::shared_ptr<Cameras> cameraRig(new Cameras());
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, sharedK); cameraRig->push_back(Camera(Pose3::identity(), sharedK));
SmartFactorRS::shared_ptr smartFactor2(new SmartFactorRS(model, params)); SmartFactorRS::shared_ptr smartFactor1(
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, sharedK); new SmartFactorRS(model, cameraRig, params));
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors);
SmartFactorRS::shared_ptr smartFactor3(new SmartFactorRS(model, params)); SmartFactorRS::shared_ptr smartFactor2(
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, sharedK); new SmartFactorRS(model, cameraRig, params));
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors);
SmartFactorRS::shared_ptr smartFactor4(new SmartFactorRS(model, params)); SmartFactorRS::shared_ptr smartFactor3(
smartFactor4->add(measurements_lmk4, key_pairs, interp_factors, sharedK); new SmartFactorRS(model, cameraRig, params));
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors);
SmartFactorRS::shared_ptr smartFactor4(
new SmartFactorRS(model, cameraRig, params));
smartFactor4->add(measurements_lmk4, key_pairs, interp_factors);
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
@ -733,8 +960,12 @@ TEST(SmartProjectionPoseFactorRollingShutter,
interp_factors.push_back(interp_factor2); interp_factors.push_back(interp_factor2);
interp_factors.push_back(interp_factor3); interp_factors.push_back(interp_factor3);
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model)); boost::shared_ptr<Cameras> cameraRig(new Cameras());
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors, sharedK); cameraRig->push_back(Camera(Pose3::identity(), sharedK));
SmartFactorRS::shared_ptr smartFactor1(
new SmartFactorRS(model, cameraRig, params));
smartFactor1->add(measurements_lmk1, key_pairs, interp_factors);
Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100),
Point3(0.1, 0.1, 0.1)); // smaller noise Point3(0.1, 0.1, 0.1)); // smaller noise
@ -870,9 +1101,12 @@ TEST(SmartProjectionPoseFactorRollingShutter,
interp_factors.push_back(interp_factor3); interp_factors.push_back(interp_factor3);
interp_factors.push_back(interp_factor1); interp_factors.push_back(interp_factor1);
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model)); boost::shared_ptr<Cameras> cameraRig(new Cameras());
smartFactor1->add(measurements_lmk1_redundant, key_pairs, interp_factors, cameraRig->push_back(Camera(Pose3::identity(), sharedK));
sharedK);
SmartFactorRS::shared_ptr smartFactor1(
new SmartFactorRS(model, cameraRig, params));
smartFactor1->add(measurements_lmk1_redundant, key_pairs, interp_factors);
Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100), Pose3 noise_pose = Pose3(Rot3::Ypr(-M_PI / 100, 0., -M_PI / 100),
Point3(0.1, 0.1, 0.1)); // smaller noise Point3(0.1, 0.1, 0.1)); // smaller noise
@ -1026,15 +1260,21 @@ TEST(SmartProjectionPoseFactorRollingShutter,
interp_factors_redundant.push_back( interp_factors_redundant.push_back(
interp_factors.at(0)); // we readd the first interp factor interp_factors.at(0)); // we readd the first interp factor
SmartFactorRS::shared_ptr smartFactor1(new SmartFactorRS(model)); boost::shared_ptr<Cameras> cameraRig(new Cameras());
cameraRig->push_back(Camera(Pose3::identity(), sharedK));
SmartFactorRS::shared_ptr smartFactor1(
new SmartFactorRS(model, cameraRig, params));
smartFactor1->add(measurements_lmk1_redundant, key_pairs_redundant, smartFactor1->add(measurements_lmk1_redundant, key_pairs_redundant,
interp_factors_redundant, sharedK); interp_factors_redundant);
SmartFactorRS::shared_ptr smartFactor2(new SmartFactorRS(model)); SmartFactorRS::shared_ptr smartFactor2(
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors, sharedK); new SmartFactorRS(model, cameraRig, params));
smartFactor2->add(measurements_lmk2, key_pairs, interp_factors);
SmartFactorRS::shared_ptr smartFactor3(new SmartFactorRS(model)); SmartFactorRS::shared_ptr smartFactor3(
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors, sharedK); new SmartFactorRS(model, cameraRig, params));
smartFactor3->add(measurements_lmk3, key_pairs, interp_factors);
const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10); const SharedDiagonal noisePrior = noiseModel::Isotropic::Sigma(6, 0.10);
@ -1076,16 +1316,20 @@ TEST(SmartProjectionPoseFactorRollingShutter,
#ifndef DISABLE_TIMING #ifndef DISABLE_TIMING
#include <gtsam/base/timing.h> #include <gtsam/base/timing.h>
// -Total: 0 CPU (0 times, 0 wall, 0.04 children, min: 0 max: 0) //-Total: 0 CPU (0 times, 0 wall, 0.21 children, min: 0 max: 0)
//| -SF RS LINEARIZE: 0.02 CPU (1000 times, 0.017244 wall, 0.02 children, min: //| -SF RS LINEARIZE: 0.09 CPU
// 0 max: 0) | -RS LINEARIZE: 0.02 CPU (1000 times, 0.009035 wall, 0.02 // (10000 times, 0.124106 wall, 0.09 children, min: 0 max: 0)
// children, min: 0 max: 0) //| -RS LINEARIZE: 0.09 CPU
// (10000 times, 0.068719 wall, 0.09 children, min: 0 max: 0)
/* *************************************************************************/ /* *************************************************************************/
TEST(SmartProjectionPoseFactorRollingShutter, timing) { TEST(SmartProjectionPoseFactorRollingShutter, timing) {
using namespace vanillaPose; using namespace vanillaPose;
// Default cameras for simple derivatives // Default cameras for simple derivatives
static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0)); static Cal3_S2::shared_ptr sharedKSimple(new Cal3_S2(100, 100, 0, 0, 0));
SmartProjectionParams params(
gtsam::HESSIAN,
gtsam::ZERO_ON_DEGENERACY); // only config that works with RS factors
Rot3 R = Rot3::identity(); Rot3 R = Rot3::identity();
Pose3 pose1 = Pose3(R, Point3(0, 0, 0)); Pose3 pose1 = Pose3(R, Point3(0, 0, 0));
@ -1102,16 +1346,18 @@ TEST(SmartProjectionPoseFactorRollingShutter, timing) {
measurements_lmk1.push_back(cam1.project(landmark1)); measurements_lmk1.push_back(cam1.project(landmark1));
measurements_lmk1.push_back(cam2.project(landmark1)); measurements_lmk1.push_back(cam2.project(landmark1));
size_t nrTests = 1000; size_t nrTests = 10000;
for (size_t i = 0; i < nrTests; i++) { for (size_t i = 0; i < nrTests; i++) {
SmartFactorRS::shared_ptr smartFactorRS(new SmartFactorRS(model)); boost::shared_ptr<Cameras> cameraRig(new Cameras());
cameraRig->push_back(Camera(body_P_sensorId, sharedKSimple));
SmartFactorRS::shared_ptr smartFactorRS(new SmartFactorRS(
model, cameraRig, params));
double interp_factor = 0; // equivalent to measurement taken at pose 1 double interp_factor = 0; // equivalent to measurement taken at pose 1
smartFactorRS->add(measurements_lmk1[0], x1, x2, interp_factor, smartFactorRS->add(measurements_lmk1[0], x1, x2, interp_factor);
sharedKSimple, body_P_sensorId);
interp_factor = 1; // equivalent to measurement taken at pose 2 interp_factor = 1; // equivalent to measurement taken at pose 2
smartFactorRS->add(measurements_lmk1[1], x1, x2, interp_factor, smartFactorRS->add(measurements_lmk1[1], x1, x2, interp_factor);
sharedKSimple, body_P_sensorId);
Values values; Values values;
values.insert(x1, pose1); values.insert(x1, pose1);
@ -1122,7 +1368,8 @@ TEST(SmartProjectionPoseFactorRollingShutter, timing) {
} }
for (size_t i = 0; i < nrTests; i++) { for (size_t i = 0; i < nrTests; i++) {
SmartFactor::shared_ptr smartFactor(new SmartFactor(model, sharedKSimple)); SmartFactor::shared_ptr smartFactor(
new SmartFactor(model, sharedKSimple, params));
smartFactor->add(measurements_lmk1[0], x1); smartFactor->add(measurements_lmk1[0], x1);
smartFactor->add(measurements_lmk1[1], x2); smartFactor->add(measurements_lmk1[1], x2);

View File

@ -89,12 +89,19 @@ set_target_properties(${GTSAM_PYTHON_TARGET} PROPERTIES
RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name RELWITHDEBINFO_POSTFIX "" # Otherwise you will have a wrong name
) )
# Set the path for the GTSAM python module
set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam) set(GTSAM_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam)
# Symlink all tests .py files to build folder. # Copy all python files to build folder.
copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam" copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam"
"${GTSAM_MODULE_PATH}") "${GTSAM_MODULE_PATH}")
# Hack to get python test files copied every time they are modified
file(GLOB GTSAM_PYTHON_TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam/tests/*.py")
foreach(test_file ${GTSAM_PYTHON_TEST_FILES})
configure_file(${test_file} "${GTSAM_MODULE_PATH}/tests/${test_file}" COPYONLY)
endforeach()
# Common directory for data/datasets stored with the package. # Common directory for data/datasets stored with the package.
# This will store the data in the Python site package directly. # This will store the data in the Python site package directly.
file(COPY "${GTSAM_SOURCE_DIR}/examples/Data" DESTINATION "${GTSAM_MODULE_PATH}") file(COPY "${GTSAM_SOURCE_DIR}/examples/Data" DESTINATION "${GTSAM_MODULE_PATH}")
@ -147,10 +154,16 @@ if(GTSAM_UNSTABLE_BUILD_PYTHON)
set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable) set(GTSAM_UNSTABLE_MODULE_PATH ${GTSAM_PYTHON_BUILD_DIRECTORY}/gtsam_unstable)
# Symlink all tests .py files to build folder. # Copy all python files to build folder.
copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable" copy_directory("${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable"
"${GTSAM_UNSTABLE_MODULE_PATH}") "${GTSAM_UNSTABLE_MODULE_PATH}")
# Hack to get python test files copied every time they are modified
file(GLOB GTSAM_UNSTABLE_PYTHON_TEST_FILES "${CMAKE_CURRENT_SOURCE_DIR}/gtsam_unstable/tests/*.py")
foreach(test_file ${GTSAM_PYTHON_TEST_FILES})
configure_file(${test_file} "${GTSAM_UNSTABLE_MODULE_PATH}/tests/${test_file}" COPYONLY)
endforeach()
# Add gtsam_unstable to the install target # Add gtsam_unstable to the install target
list(APPEND GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_UNSTABLE_TARGET}) list(APPEND GTSAM_PYTHON_DEPENDENCIES ${GTSAM_PYTHON_UNSTABLE_TARGET})

View File

@ -10,31 +10,51 @@ A script validating and demonstrating the ImuFactor inference.
Author: Frank Dellaert, Varun Agrawal Author: Frank Dellaert, Varun Agrawal
""" """
# pylint: disable=no-name-in-module,unused-import,arguments-differ # pylint: disable=no-name-in-module,unused-import,arguments-differ,import-error,wrong-import-order
from __future__ import print_function from __future__ import print_function
import argparse import argparse
import math import math
import gtsam
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import gtsam
from gtsam.symbol_shorthand import B, V, X from gtsam.symbol_shorthand import B, V, X
from gtsam.utils.plot import plot_pose3 from gtsam.utils.plot import plot_pose3
from mpl_toolkits.mplot3d import Axes3D
from PreintegrationExample import POSES_FIG, PreintegrationExample from PreintegrationExample import POSES_FIG, PreintegrationExample
BIAS_KEY = B(0) BIAS_KEY = B(0)
GRAVITY = 9.81
np.set_printoptions(precision=3, suppress=True) np.set_printoptions(precision=3, suppress=True)
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser("ImuFactorExample.py")
parser.add_argument("--twist_scenario",
default="sick_twist",
choices=("zero_twist", "forward_twist", "loop_twist",
"sick_twist"))
parser.add_argument("--time",
"-T",
default=12,
type=int,
help="Total navigation time in seconds")
parser.add_argument("--compute_covariances",
default=False,
action='store_true')
parser.add_argument("--verbose", default=False, action='store_true')
args = parser.parse_args()
return args
class ImuFactorExample(PreintegrationExample): class ImuFactorExample(PreintegrationExample):
"""Class to run example of the Imu Factor.""" """Class to run example of the Imu Factor."""
def __init__(self, twist_scenario="sick_twist"): def __init__(self, twist_scenario: str = "sick_twist"):
self.velocity = np.array([2, 0, 0]) self.velocity = np.array([2, 0, 0])
self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1) self.priorNoise = gtsam.noiseModel.Isotropic.Sigma(6, 0.1)
self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1) self.velNoise = gtsam.noiseModel.Isotropic.Sigma(3, 0.1)
@ -51,19 +71,30 @@ class ImuFactorExample(PreintegrationExample):
gyroBias = np.array([0.1, 0.3, -0.1]) gyroBias = np.array([0.1, 0.3, -0.1])
bias = gtsam.imuBias.ConstantBias(accBias, gyroBias) bias = gtsam.imuBias.ConstantBias(accBias, gyroBias)
params = gtsam.PreintegrationParams.MakeSharedU(GRAVITY)
# Some arbitrary noise sigmas
gyro_sigma = 1e-3
accel_sigma = 1e-3
I_3x3 = np.eye(3)
params.setGyroscopeCovariance(gyro_sigma**2 * I_3x3)
params.setAccelerometerCovariance(accel_sigma**2 * I_3x3)
params.setIntegrationCovariance(1e-7**2 * I_3x3)
dt = 1e-2 dt = 1e-2
super(ImuFactorExample, self).__init__(twist_scenarios[twist_scenario], super(ImuFactorExample, self).__init__(twist_scenarios[twist_scenario],
bias, dt) bias, params, dt)
def addPrior(self, i, graph): def addPrior(self, i: int, graph: gtsam.NonlinearFactorGraph):
"""Add priors at time step `i`.""" """Add a prior on the navigation state at time `i`."""
state = self.scenario.navState(i) state = self.scenario.navState(i)
graph.push_back( graph.push_back(
gtsam.PriorFactorPose3(X(i), state.pose(), self.priorNoise)) gtsam.PriorFactorPose3(X(i), state.pose(), self.priorNoise))
graph.push_back( graph.push_back(
gtsam.PriorFactorVector(V(i), state.velocity(), self.velNoise)) gtsam.PriorFactorVector(V(i), state.velocity(), self.velNoise))
def optimize(self, graph, initial): def optimize(self, graph: gtsam.NonlinearFactorGraph,
initial: gtsam.Values):
"""Optimize using Levenberg-Marquardt optimization.""" """Optimize using Levenberg-Marquardt optimization."""
params = gtsam.LevenbergMarquardtParams() params = gtsam.LevenbergMarquardtParams()
params.setVerbosityLM("SUMMARY") params.setVerbosityLM("SUMMARY")
@ -71,24 +102,49 @@ class ImuFactorExample(PreintegrationExample):
result = optimizer.optimize() result = optimizer.optimize()
return result return result
def plot(self, result): def plot(self,
"""Plot resulting poses.""" values: gtsam.Values,
title: str = "Estimated Trajectory",
fignum: int = POSES_FIG + 1,
show: bool = False):
"""
Plot poses in values.
Args:
values: The values object with the poses to plot.
title: The title of the plot.
fignum: The matplotlib figure number.
POSES_FIG is a value from the PreintegrationExample which we simply increment to generate a new figure.
show: Flag indicating whether to display the figure.
"""
i = 0 i = 0
while result.exists(X(i)): while values.exists(X(i)):
pose_i = result.atPose3(X(i)) pose_i = values.atPose3(X(i))
plot_pose3(POSES_FIG + 1, pose_i, 1) plot_pose3(fignum, pose_i, 1)
i += 1 i += 1
plt.title("Estimated Trajectory") plt.title(title)
gtsam.utils.plot.set_axes_equal(POSES_FIG + 1) gtsam.utils.plot.set_axes_equal(fignum)
print("Bias Values", result.atConstantBias(BIAS_KEY)) print("Bias Values", values.atConstantBias(BIAS_KEY))
plt.ioff() plt.ioff()
if show:
plt.show() plt.show()
def run(self, T=12, compute_covariances=False, verbose=True): def run(self,
"""Main runner.""" T: int = 12,
compute_covariances: bool = False,
verbose: bool = True):
"""
Main runner.
Args:
T: Total trajectory time.
compute_covariances: Flag indicating whether to compute marginal covariances.
verbose: Flag indicating if printing should be verbose.
"""
graph = gtsam.NonlinearFactorGraph() graph = gtsam.NonlinearFactorGraph()
# initialize data structure for pre-integrated IMU measurements # initialize data structure for pre-integrated IMU measurements
@ -173,25 +229,11 @@ class ImuFactorExample(PreintegrationExample):
print("Covariance on vel {}:\n{}\n".format( print("Covariance on vel {}:\n{}\n".format(
i, marginals.marginalCovariance(V(i)))) i, marginals.marginalCovariance(V(i))))
self.plot(result) self.plot(result, show=True)
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser("ImuFactorExample.py") args = parse_args()
parser.add_argument("--twist_scenario",
default="sick_twist",
choices=("zero_twist", "forward_twist", "loop_twist",
"sick_twist"))
parser.add_argument("--time",
"-T",
default=12,
type=int,
help="Total time in seconds")
parser.add_argument("--compute_covariances",
default=False,
action='store_true')
parser.add_argument("--verbose", default=False, action='store_true')
args = parser.parse_args()
ImuFactorExample(args.twist_scenario).run(args.time, ImuFactorExample(args.twist_scenario).run(args.time,
args.compute_covariances, args.compute_covariances,

View File

@ -0,0 +1,178 @@
"""
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
See LICENSE for the license information
Pose SLAM example using iSAM2 in the 2D plane.
Author: Jerred Chen, Yusuf Ali
Modeled after:
- VisualISAM2Example by: Duy-Nguyen Ta (C++), Frank Dellaert (Python)
- Pose2SLAMExample by: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
"""
import math
import matplotlib.pyplot as plt
import numpy as np
import gtsam
import gtsam.utils.plot as gtsam_plot
def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values,
key: int):
"""Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2."""
# Print the current estimates computed using iSAM2.
print("*"*50 + f"\nInference after State {key+1}:\n")
print(current_estimate)
# Compute the marginals for all states in the graph.
marginals = gtsam.Marginals(graph, current_estimate)
# Plot the newly updated iSAM2 inference.
fig = plt.figure(0)
axes = fig.gca()
plt.cla()
i = 1
while current_estimate.exists(i):
gtsam_plot.plot_pose2(0, current_estimate.atPose2(i), 0.5, marginals.marginalCovariance(i))
i += 1
plt.axis('equal')
axes.set_xlim(-1, 5)
axes.set_ylim(-1, 3)
plt.pause(1)
def determine_loop_closure(odom: np.ndarray, current_estimate: gtsam.Values,
key: int, xy_tol=0.6, theta_tol=17) -> int:
"""Simple brute force approach which iterates through previous states
and checks for loop closure.
Args:
odom: Vector representing noisy odometry (x, y, theta) measurement in the body frame.
current_estimate: The current estimates computed by iSAM2.
key: Key corresponding to the current state estimate of the robot.
xy_tol: Optional argument for the x-y measurement tolerance, in meters.
theta_tol: Optional argument for the theta measurement tolerance, in degrees.
Returns:
k: The key of the state which is helping add the loop closure constraint.
If loop closure is not found, then None is returned.
"""
if current_estimate:
prev_est = current_estimate.atPose2(key+1)
rotated_odom = prev_est.rotation().matrix() @ odom[:2]
curr_xy = np.array([prev_est.x() + rotated_odom[0],
prev_est.y() + rotated_odom[1]])
curr_theta = prev_est.theta() + odom[2]
for k in range(1, key+1):
pose_xy = np.array([current_estimate.atPose2(k).x(),
current_estimate.atPose2(k).y()])
pose_theta = current_estimate.atPose2(k).theta()
if (abs(pose_xy - curr_xy) <= xy_tol).all() and \
(abs(pose_theta - curr_theta) <= theta_tol*np.pi/180):
return k
def Pose2SLAM_ISAM2_example():
"""Perform 2D SLAM given the ground truth changes in pose as well as
simple loop closure detection."""
plt.ion()
# Declare the 2D translational standard deviations of the prior factor's Gaussian model, in meters.
prior_xy_sigma = 0.3
# Declare the 2D rotational standard deviation of the prior factor's Gaussian model, in degrees.
prior_theta_sigma = 5
# Declare the 2D translational standard deviations of the odometry factor's Gaussian model, in meters.
odometry_xy_sigma = 0.2
# Declare the 2D rotational standard deviation of the odometry factor's Gaussian model, in degrees.
odometry_theta_sigma = 5
# Although this example only uses linear measurements and Gaussian noise models, it is important
# to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
# simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_xy_sigma,
prior_xy_sigma,
prior_theta_sigma*np.pi/180]))
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_xy_sigma,
odometry_xy_sigma,
odometry_theta_sigma*np.pi/180]))
# Create a Nonlinear factor graph as well as the data structure to hold state estimates.
graph = gtsam.NonlinearFactorGraph()
initial_estimate = gtsam.Values()
# Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
# update calls are required to perform the relinearization.
parameters = gtsam.ISAM2Params()
parameters.setRelinearizeThreshold(0.1)
parameters.setRelinearizeSkip(1)
isam = gtsam.ISAM2(parameters)
# Create the ground truth odometry measurements of the robot during the trajectory.
true_odometry = [(2, 0, 0),
(2, 0, math.pi/2),
(2, 0, math.pi/2),
(2, 0, math.pi/2),
(2, 0, math.pi/2)]
# Corrupt the odometry measurements with gaussian noise to create noisy odometry measurements.
odometry_measurements = [np.random.multivariate_normal(true_odom, ODOMETRY_NOISE.covariance())
for true_odom in true_odometry]
# Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
# iSAM2 incremental optimization.
graph.push_back(gtsam.PriorFactorPose2(1, gtsam.Pose2(0, 0, 0), PRIOR_NOISE))
initial_estimate.insert(1, gtsam.Pose2(0.5, 0.0, 0.2))
# Initialize the current estimate which is used during the incremental inference loop.
current_estimate = initial_estimate
for i in range(len(true_odometry)):
# Obtain the noisy odometry that is received by the robot and corrupted by gaussian noise.
noisy_odom_x, noisy_odom_y, noisy_odom_theta = odometry_measurements[i]
# Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
loop = determine_loop_closure(odometry_measurements[i], current_estimate, i, xy_tol=0.8, theta_tol=25)
# Add a binary factor in between two existing states if loop closure is detected.
# Otherwise, add a binary factor between a newly observed state and the previous state.
if loop:
graph.push_back(gtsam.BetweenFactorPose2(i + 1, loop,
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
else:
graph.push_back(gtsam.BetweenFactorPose2(i + 1, i + 2,
gtsam.Pose2(noisy_odom_x, noisy_odom_y, noisy_odom_theta), ODOMETRY_NOISE))
# Compute and insert the initialization estimate for the current pose using the noisy odometry measurement.
computed_estimate = current_estimate.atPose2(i + 1).compose(gtsam.Pose2(noisy_odom_x,
noisy_odom_y,
noisy_odom_theta))
initial_estimate.insert(i + 2, computed_estimate)
# Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
isam.update(graph, initial_estimate)
current_estimate = isam.calculateEstimate()
# Report all current state estimates from the iSAM2 optimzation.
report_on_progress(graph, current_estimate, i)
initial_estimate.clear()
# Print the final covariance matrix for each pose after completing inference on the trajectory.
marginals = gtsam.Marginals(graph, current_estimate)
i = 1
for i in range(1, len(true_odometry)+1):
print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
plt.ioff()
plt.show()
if __name__ == "__main__":
Pose2SLAM_ISAM2_example()

View File

@ -0,0 +1,208 @@
"""
GTSAM Copyright 2010-2018, Georgia Tech Research Corporation,
Atlanta, Georgia 30332-0415
All Rights Reserved
Authors: Frank Dellaert, et al. (see THANKS for the full author list)
See LICENSE for the license information
Pose SLAM example using iSAM2 in 3D space.
Author: Jerred Chen
Modeled after:
- VisualISAM2Example by: Duy-Nguyen Ta (C++), Frank Dellaert (Python)
- Pose2SLAMExample by: Alex Cunningham (C++), Kevin Deng & Frank Dellaert (Python)
"""
from typing import List
import matplotlib.pyplot as plt
import numpy as np
import gtsam
import gtsam.utils.plot as gtsam_plot
def report_on_progress(graph: gtsam.NonlinearFactorGraph, current_estimate: gtsam.Values,
key: int):
"""Print and plot incremental progress of the robot for 2D Pose SLAM using iSAM2."""
# Print the current estimates computed using iSAM2.
print("*"*50 + f"\nInference after State {key+1}:\n")
print(current_estimate)
# Compute the marginals for all states in the graph.
marginals = gtsam.Marginals(graph, current_estimate)
# Plot the newly updated iSAM2 inference.
fig = plt.figure(0)
axes = fig.gca(projection='3d')
plt.cla()
i = 1
while current_estimate.exists(i):
gtsam_plot.plot_pose3(0, current_estimate.atPose3(i), 10,
marginals.marginalCovariance(i))
i += 1
axes.set_xlim3d(-30, 45)
axes.set_ylim3d(-30, 45)
axes.set_zlim3d(-30, 45)
plt.pause(1)
def create_poses() -> List[gtsam.Pose3]:
"""Creates ground truth poses of the robot."""
P0 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
P1 = np.array([[0, -1, 0, 15],
[1, 0, 0, 15],
[0, 0, 1, 20],
[0, 0, 0, 1]])
P2 = np.array([[np.cos(np.pi/4), 0, np.sin(np.pi/4), 30],
[0, 1, 0, 30],
[-np.sin(np.pi/4), 0, np.cos(np.pi/4), 30],
[0, 0, 0, 1]])
P3 = np.array([[0, 1, 0, 30],
[0, 0, -1, 0],
[-1, 0, 0, -15],
[0, 0, 0, 1]])
P4 = np.array([[-1, 0, 0, 0],
[0, -1, 0, -10],
[0, 0, 1, -10],
[0, 0, 0, 1]])
P5 = P0[:]
return [gtsam.Pose3(P0), gtsam.Pose3(P1), gtsam.Pose3(P2),
gtsam.Pose3(P3), gtsam.Pose3(P4), gtsam.Pose3(P5)]
def determine_loop_closure(odom_tf: gtsam.Pose3, current_estimate: gtsam.Values,
key: int, xyz_tol=0.6, rot_tol=17) -> int:
"""Simple brute force approach which iterates through previous states
and checks for loop closure.
Args:
odom_tf: The noisy odometry transformation measurement in the body frame.
current_estimate: The current estimates computed by iSAM2.
key: Key corresponding to the current state estimate of the robot.
xyz_tol: Optional argument for the translational tolerance, in meters.
rot_tol: Optional argument for the rotational tolerance, in degrees.
Returns:
k: The key of the state which is helping add the loop closure constraint.
If loop closure is not found, then None is returned.
"""
if current_estimate:
prev_est = current_estimate.atPose3(key+1)
curr_est = prev_est.compose(odom_tf)
for k in range(1, key+1):
pose = current_estimate.atPose3(k)
if (abs(pose.matrix()[:3,:3] - curr_est.matrix()[:3,:3]) <= rot_tol*np.pi/180).all() and \
(abs(pose.matrix()[:3,3] - curr_est.matrix()[:3,3]) <= xyz_tol).all():
return k
def Pose3_ISAM2_example():
"""Perform 3D SLAM given ground truth poses as well as simple
loop closure detection."""
plt.ion()
# Declare the 3D translational standard deviations of the prior factor's Gaussian model, in meters.
prior_xyz_sigma = 0.3
# Declare the 3D rotational standard deviations of the prior factor's Gaussian model, in degrees.
prior_rpy_sigma = 5
# Declare the 3D translational standard deviations of the odometry factor's Gaussian model, in meters.
odometry_xyz_sigma = 0.2
# Declare the 3D rotational standard deviations of the odometry factor's Gaussian model, in degrees.
odometry_rpy_sigma = 5
# Although this example only uses linear measurements and Gaussian noise models, it is important
# to note that iSAM2 can be utilized to its full potential during nonlinear optimization. This example
# simply showcases how iSAM2 may be applied to a Pose2 SLAM problem.
PRIOR_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([prior_rpy_sigma*np.pi/180,
prior_rpy_sigma*np.pi/180,
prior_rpy_sigma*np.pi/180,
prior_xyz_sigma,
prior_xyz_sigma,
prior_xyz_sigma]))
ODOMETRY_NOISE = gtsam.noiseModel.Diagonal.Sigmas(np.array([odometry_rpy_sigma*np.pi/180,
odometry_rpy_sigma*np.pi/180,
odometry_rpy_sigma*np.pi/180,
odometry_xyz_sigma,
odometry_xyz_sigma,
odometry_xyz_sigma]))
# Create a Nonlinear factor graph as well as the data structure to hold state estimates.
graph = gtsam.NonlinearFactorGraph()
initial_estimate = gtsam.Values()
# Create iSAM2 parameters which can adjust the threshold necessary to force relinearization and how many
# update calls are required to perform the relinearization.
parameters = gtsam.ISAM2Params()
parameters.setRelinearizeThreshold(0.1)
parameters.setRelinearizeSkip(1)
isam = gtsam.ISAM2(parameters)
# Create the ground truth poses of the robot trajectory.
true_poses = create_poses()
# Create the ground truth odometry transformations, xyz translations, and roll-pitch-yaw rotations
# between each robot pose in the trajectory.
odometry_tf = [true_poses[i-1].transformPoseTo(true_poses[i]) for i in range(1, len(true_poses))]
odometry_xyz = [(odometry_tf[i].x(), odometry_tf[i].y(), odometry_tf[i].z()) for i in range(len(odometry_tf))]
odometry_rpy = [odometry_tf[i].rotation().rpy() for i in range(len(odometry_tf))]
# Corrupt xyz translations and roll-pitch-yaw rotations with gaussian noise to create noisy odometry measurements.
noisy_measurements = [np.random.multivariate_normal(np.hstack((odometry_rpy[i],odometry_xyz[i])), \
ODOMETRY_NOISE.covariance()) for i in range(len(odometry_tf))]
# Add the prior factor to the factor graph, and poorly initialize the prior pose to demonstrate
# iSAM2 incremental optimization.
graph.push_back(gtsam.PriorFactorPose3(1, true_poses[0], PRIOR_NOISE))
initial_estimate.insert(1, true_poses[0].compose(gtsam.Pose3(
gtsam.Rot3.Rodrigues(-0.1, 0.2, 0.25), gtsam.Point3(0.05, -0.10, 0.20))))
# Initialize the current estimate which is used during the incremental inference loop.
current_estimate = initial_estimate
for i in range(len(odometry_tf)):
# Obtain the noisy translation and rotation that is received by the robot and corrupted by gaussian noise.
noisy_odometry = noisy_measurements[i]
# Compute the noisy odometry transformation according to the xyz translation and roll-pitch-yaw rotation.
noisy_tf = gtsam.Pose3(gtsam.Rot3.RzRyRx(noisy_odometry[:3]), noisy_odometry[3:6].reshape(-1,1))
# Determine if there is loop closure based on the odometry measurement and the previous estimate of the state.
loop = determine_loop_closure(noisy_tf, current_estimate, i, xyz_tol=18, rot_tol=30)
# Add a binary factor in between two existing states if loop closure is detected.
# Otherwise, add a binary factor between a newly observed state and the previous state.
if loop:
graph.push_back(gtsam.BetweenFactorPose3(i + 1, loop, noisy_tf, ODOMETRY_NOISE))
else:
graph.push_back(gtsam.BetweenFactorPose3(i + 1, i + 2, noisy_tf, ODOMETRY_NOISE))
# Compute and insert the initialization estimate for the current pose using a noisy odometry measurement.
noisy_estimate = current_estimate.atPose3(i + 1).compose(noisy_tf)
initial_estimate.insert(i + 2, noisy_estimate)
# Perform incremental update to iSAM2's internal Bayes tree, optimizing only the affected variables.
isam.update(graph, initial_estimate)
current_estimate = isam.calculateEstimate()
# Report all current state estimates from the iSAM2 optimization.
report_on_progress(graph, current_estimate, i)
initial_estimate.clear()
# Print the final covariance matrix for each pose after completing inference.
marginals = gtsam.Marginals(graph, current_estimate)
i = 1
while current_estimate.exists(i):
print(f"X{i} covariance:\n{marginals.marginalCovariance(i)}\n")
i += 1
plt.ioff()
plt.show()
if __name__ == '__main__':
Pose3_ISAM2_example()

View File

@ -5,10 +5,14 @@ All Rights Reserved
See LICENSE for the license information See LICENSE for the license information
A script validating the Preintegration of IMU measurements A script validating the Preintegration of IMU measurements.
Authors: Frank Dellaert, Varun Agrawal.
""" """
import math # pylint: disable=invalid-name,unused-import,wrong-import-order
from typing import Optional, Sequence
import gtsam import gtsam
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
@ -18,25 +22,28 @@ from mpl_toolkits.mplot3d import Axes3D
IMU_FIG = 1 IMU_FIG = 1
POSES_FIG = 2 POSES_FIG = 2
GRAVITY = 10
class PreintegrationExample(object): class PreintegrationExample:
"""Base class for all preintegration examples."""
@staticmethod @staticmethod
def defaultParams(g): def defaultParams(g: float):
"""Create default parameters with Z *up* and realistic noise parameters""" """Create default parameters with Z *up* and realistic noise parameters"""
params = gtsam.PreintegrationParams.MakeSharedU(g) params = gtsam.PreintegrationParams.MakeSharedU(g)
kGyroSigma = math.radians(0.5) / 60 # 0.5 degree ARW kGyroSigma = np.radians(0.5) / 60 # 0.5 degree ARW
kAccelSigma = 0.1 / 60 # 10 cm VRW kAccelSigma = 0.1 / 60 # 10 cm VRW
params.setGyroscopeCovariance( params.setGyroscopeCovariance(kGyroSigma**2 * np.identity(3, float))
kGyroSigma ** 2 * np.identity(3, float)) params.setAccelerometerCovariance(kAccelSigma**2 *
params.setAccelerometerCovariance( np.identity(3, float))
kAccelSigma ** 2 * np.identity(3, float)) params.setIntegrationCovariance(0.0000001**2 * np.identity(3, float))
params.setIntegrationCovariance(
0.0000001 ** 2 * np.identity(3, float))
return params return params
def __init__(self, twist=None, bias=None, dt=1e-2): def __init__(self,
twist: Optional[np.ndarray] = None,
bias: Optional[gtsam.imuBias.ConstantBias] = None,
params: Optional[gtsam.PreintegrationParams] = None,
dt: float = 1e-2):
"""Initialize with given twist, a pair(angularVelocityVector, velocityVector).""" """Initialize with given twist, a pair(angularVelocityVector, velocityVector)."""
# setup interactive plotting # setup interactive plotting
@ -48,7 +55,7 @@ class PreintegrationExample(object):
else: else:
# default = loop with forward velocity 2m/s, while pitching up # default = loop with forward velocity 2m/s, while pitching up
# with angular velocity 30 degree/sec (negative in FLU) # with angular velocity 30 degree/sec (negative in FLU)
W = np.array([0, -math.radians(30), 0]) W = np.array([0, -np.radians(30), 0])
V = np.array([2, 0, 0]) V = np.array([2, 0, 0])
self.scenario = gtsam.ConstantTwistScenario(W, V) self.scenario = gtsam.ConstantTwistScenario(W, V)
@ -58,9 +65,11 @@ class PreintegrationExample(object):
self.labels = list('xyz') self.labels = list('xyz')
self.colors = list('rgb') self.colors = list('rgb')
# Create runner if params:
self.g = 10 # simple gravity constant self.params = params
self.params = self.defaultParams(self.g) else:
# Default params with simple gravity constant
self.params = self.defaultParams(g=GRAVITY)
if bias is not None: if bias is not None:
self.actualBias = bias self.actualBias = bias
@ -69,13 +78,22 @@ class PreintegrationExample(object):
gyroBias = np.array([0, 0, 0]) gyroBias = np.array([0, 0, 0])
self.actualBias = gtsam.imuBias.ConstantBias(accBias, gyroBias) self.actualBias = gtsam.imuBias.ConstantBias(accBias, gyroBias)
self.runner = gtsam.ScenarioRunner( # Create runner
self.scenario, self.params, self.dt, self.actualBias) self.runner = gtsam.ScenarioRunner(self.scenario, self.params, self.dt,
self.actualBias)
fig, self.axes = plt.subplots(4, 3) fig, self.axes = plt.subplots(4, 3)
fig.set_tight_layout(True) fig.set_tight_layout(True)
def plotImu(self, t, measuredOmega, measuredAcc): def plotImu(self, t: float, measuredOmega: Sequence,
measuredAcc: Sequence):
"""
Plot IMU measurements.
Args:
t: The time at which the measurement was recoreded.
measuredOmega: Measured angular velocity.
measuredAcc: Measured linear acceleration.
"""
plt.figure(IMU_FIG) plt.figure(IMU_FIG)
# plot angular velocity # plot angular velocity
@ -108,12 +126,21 @@ class PreintegrationExample(object):
ax.scatter(t, measuredAcc[i], color=color, marker='.') ax.scatter(t, measuredAcc[i], color=color, marker='.')
ax.set_xlabel('specific force ' + label) ax.set_xlabel('specific force ' + label)
def plotGroundTruthPose(self, t, scale=0.3, time_interval=0.01): def plotGroundTruthPose(self,
# plot ground truth pose, as well as prediction from integrated IMU measurements t: float,
scale: float = 0.3,
time_interval: float = 0.01):
"""
Plot ground truth pose, as well as prediction from integrated IMU measurements.
Args:
t: Time at which the pose was obtained.
scale: The scaling factor for the pose axes.
time_interval: The time to wait before showing the plot.
"""
actualPose = self.scenario.pose(t) actualPose = self.scenario.pose(t)
plot_pose3(POSES_FIG, actualPose, scale) plot_pose3(POSES_FIG, actualPose, scale)
t = actualPose.translation() translation = actualPose.translation()
self.maxDim = max([max(np.abs(t)), self.maxDim]) self.maxDim = max([max(np.abs(translation)), self.maxDim])
ax = plt.gca() ax = plt.gca()
ax.set_xlim3d(-self.maxDim, self.maxDim) ax.set_xlim3d(-self.maxDim, self.maxDim)
ax.set_ylim3d(-self.maxDim, self.maxDim) ax.set_ylim3d(-self.maxDim, self.maxDim)
@ -121,8 +148,8 @@ class PreintegrationExample(object):
plt.pause(time_interval) plt.pause(time_interval)
def run(self, T=12): def run(self, T: int = 12):
# simulate the loop """Simulate the loop."""
for i, t in enumerate(np.arange(0, T, self.dt)): for i, t in enumerate(np.arange(0, T, self.dt)):
measuredOmega = self.runner.measuredAngularVelocity(t) measuredOmega = self.runner.measuredAngularVelocity(t)
measuredAcc = self.runner.measuredSpecificForce(t) measuredAcc = self.runner.measuredSpecificForce(t)

View File

@ -7,38 +7,48 @@
See LICENSE for the license information See LICENSE for the license information
Solve a structure-from-motion problem from a "Bundle Adjustment in the Large" file Solve a structure-from-motion problem from a "Bundle Adjustment in the Large" file
Author: Frank Dellaert (Python: Akshay Krishnan, John Lambert) Author: Frank Dellaert (Python: Akshay Krishnan, John Lambert, Varun Agrawal)
""" """
import argparse import argparse
import logging import logging
import sys import sys
import matplotlib.pyplot as plt
import numpy as np
import gtsam import gtsam
from gtsam import ( from gtsam import (GeneralSFMFactorCal3Bundler,
GeneralSFMFactorCal3Bundler, PriorFactorPinholeCameraCal3Bundler, PriorFactorPoint3)
PinholeCameraCal3Bundler, from gtsam.symbol_shorthand import C, P # type: ignore
PriorFactorPinholeCameraCal3Bundler, from gtsam.utils import plot # type: ignore
readBal, from matplotlib import pyplot as plt
symbol_shorthand
)
C = symbol_shorthand.C logging.basicConfig(stream=sys.stdout, level=logging.INFO)
P = symbol_shorthand.P
DEFAULT_BAL_DATASET = "dubrovnik-3-7-pre"
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) def plot_scene(scene_data: gtsam.SfmData, result: gtsam.Values) -> None:
"""Plot the SFM results."""
plot_vals = gtsam.Values()
for cam_idx in range(scene_data.number_cameras()):
plot_vals.insert(C(cam_idx),
result.atPinholeCameraCal3Bundler(C(cam_idx)).pose())
for j in range(scene_data.number_tracks()):
plot_vals.insert(P(j), result.atPoint3(P(j)))
def run(args): plot.plot_3d_points(0, plot_vals, linespec="g.")
plot.plot_trajectory(0, plot_vals, title="SFM results")
plt.show()
def run(args: argparse.Namespace) -> None:
""" Run LM optimization with BAL input data and report resulting error """ """ Run LM optimization with BAL input data and report resulting error """
input_file = gtsam.findExampleDataFile(args.input_file) input_file = args.input_file
# Load the SfM data from file # Load the SfM data from file
scene_data = readBal(input_file) scene_data = gtsam.readBal(input_file)
logging.info(f"read {scene_data.number_tracks()} tracks on {scene_data.number_cameras()} cameras\n") logging.info("read %d tracks on %d cameras\n", scene_data.number_tracks(),
scene_data.number_cameras())
# Create a factor graph # Create a factor graph
graph = gtsam.NonlinearFactorGraph() graph = gtsam.NonlinearFactorGraph()
@ -47,29 +57,25 @@ def run(args):
noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v noise = gtsam.noiseModel.Isotropic.Sigma(2, 1.0) # one pixel in u and v
# Add measurements to the factor graph # Add measurements to the factor graph
j = 0 for j in range(scene_data.number_tracks()):
for t_idx in range(scene_data.number_tracks()): track = scene_data.track(j) # SfmTrack
track = scene_data.track(t_idx) # SfmTrack
# retrieve the SfmMeasurement objects # retrieve the SfmMeasurement objects
for m_idx in range(track.number_measurements()): for m_idx in range(track.number_measurements()):
# i represents the camera index, and uv is the 2d measurement # i represents the camera index, and uv is the 2d measurement
i, uv = track.measurement(m_idx) i, uv = track.measurement(m_idx)
# note use of shorthand symbols C and P # note use of shorthand symbols C and P
graph.add(GeneralSFMFactorCal3Bundler(uv, noise, C(i), P(j))) graph.add(GeneralSFMFactorCal3Bundler(uv, noise, C(i), P(j)))
j += 1
# Add a prior on pose x1. This indirectly specifies where the origin is. # Add a prior on pose x1. This indirectly specifies where the origin is.
graph.push_back( graph.push_back(
gtsam.PriorFactorPinholeCameraCal3Bundler( PriorFactorPinholeCameraCal3Bundler(
C(0), scene_data.camera(0), gtsam.noiseModel.Isotropic.Sigma(9, 0.1) C(0), scene_data.camera(0),
) gtsam.noiseModel.Isotropic.Sigma(9, 0.1)))
)
# Also add a prior on the position of the first landmark to fix the scale # Also add a prior on the position of the first landmark to fix the scale
graph.push_back( graph.push_back(
gtsam.PriorFactorPoint3( PriorFactorPoint3(P(0),
P(0), scene_data.track(0).point3(), gtsam.noiseModel.Isotropic.Sigma(3, 0.1) scene_data.track(0).point3(),
) gtsam.noiseModel.Isotropic.Sigma(3, 0.1)))
)
# Create initial estimate # Create initial estimate
initial = gtsam.Values() initial = gtsam.Values()
@ -81,12 +87,10 @@ def run(args):
initial.insert(C(i), camera) initial.insert(C(i), camera)
i += 1 i += 1
j = 0
# add each SfmTrack # add each SfmTrack
for t_idx in range(scene_data.number_tracks()): for j in range(scene_data.number_tracks()):
track = scene_data.track(t_idx) track = scene_data.track(j)
initial.insert(P(j), track.point3()) initial.insert(P(j), track.point3())
j += 1
# Optimize the graph and print results # Optimize the graph and print results
try: try:
@ -94,25 +98,31 @@ def run(args):
params.setVerbosityLM("ERROR") params.setVerbosityLM("ERROR")
lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params) lm = gtsam.LevenbergMarquardtOptimizer(graph, initial, params)
result = lm.optimize() result = lm.optimize()
except Exception as e: except RuntimeError:
logging.exception("LM Optimization failed") logging.exception("LM Optimization failed")
return return
# Error drops from ~2764.22 to ~0.046 # Error drops from ~2764.22 to ~0.046
logging.info(f"final error: {graph.error(result)}") logging.info("initial error: %f", graph.error(initial))
logging.info("final error: %f", graph.error(result))
plot_scene(scene_data, result)
def main() -> None:
"""Main runner."""
parser = argparse.ArgumentParser()
parser.add_argument('-i',
'--input_file',
type=str,
default=gtsam.findExampleDataFile(DEFAULT_BAL_DATASET),
help="""Read SFM data from the specified BAL file.
The data format is described here: https://grail.cs.washington.edu/projects/bal/.
BAL files contain (nrPoses, nrPoints, nrObservations), followed by (i,j,u,v) tuples,
then (wx,wy,wz,tx,ty,tz,f,k1,k1) as Bundler camera calibrations w/ Rodrigues vector
and (x,y,z) 3d point initializations.""")
run(parser.parse_args())
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser() main()
parser.add_argument(
'-i',
'--input_file',
type=str,
default="dubrovnik-3-7-pre",
help='Read SFM data from the specified BAL file'
'The data format is described here: https://grail.cs.washington.edu/projects/bal/.'
'BAL files contain (nrPoses, nrPoints, nrObservations), followed by (i,j,u,v) tuples, '
'then (wx,wy,wz,tx,ty,tz,f,k1,k1) as Bundler camera calibrations w/ Rodrigues vector'
'and (x,y,z) 3d point initializations.'
)
run(parser.parse_args())

View File

@ -59,8 +59,16 @@ class TestPose3(GtsamTestCase):
self.assertEqual(math.sqrt(2.0), x1.range(pose=xl2)) self.assertEqual(math.sqrt(2.0), x1.range(pose=xl2))
def test_adjoint(self): def test_adjoint(self):
"""Test adjoint method.""" """Test adjoint methods."""
T = Pose3()
xi = np.array([1, 2, 3, 4, 5, 6]) xi = np.array([1, 2, 3, 4, 5, 6])
# test calling functions
T.AdjointMap()
T.Adjoint(xi)
T.AdjointTranspose(xi)
Pose3.adjointMap(xi)
Pose3.adjoint(xi, xi)
# test correctness of adjoint(x, y)
expected = np.dot(Pose3.adjointMap_(xi), xi) expected = np.dot(Pose3.adjointMap_(xi), xi)
actual = Pose3.adjoint_(xi, xi) actual = Pose3.adjoint_(xi, xi)
np.testing.assert_array_equal(actual, expected) np.testing.assert_array_equal(actual, expected)

View File

@ -606,7 +606,7 @@ Vector3 f(const Point2& a, const Vector3& b, OptionalJacobian<3, 2> H1,
if (H1) *H1 << b.y(), b.z(), b.x(), 0, 0, 0; if (H1) *H1 << b.y(), b.z(), b.x(), 0, 0, 0;
if (H2) *H2 = A; if (H2) *H2 = A;
return A * b; return A * b;
}; }
} }
TEST(ExpressionFactor, MultiplyWithInverseFunction) { TEST(ExpressionFactor, MultiplyWithInverseFunction) {

View File

@ -114,94 +114,94 @@ using symbol_shorthand::L;
/* Create GUIDs for Noisemodels */ /* Create GUIDs for Noisemodels */
/* ************************************************************************* */ /* ************************************************************************* */
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Constrained, "gtsam_noiseModel_Constrained")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Diagonal, "gtsam_noiseModel_Diagonal")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Gaussian, "gtsam_noiseModel_Gaussian")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Unit, "gtsam_noiseModel_Unit")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Isotropic, "gtsam_noiseModel_Isotropic")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::Robust, "gtsam_noiseModel_Robust")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Base , "gtsam_noiseModel_mEstimator_Base")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Null , "gtsam_noiseModel_mEstimator_Null")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Fair , "gtsam_noiseModel_mEstimator_Fair")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Huber, "gtsam_noiseModel_mEstimator_Huber")
BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey"); BOOST_CLASS_EXPORT_GUID(gtsam::noiseModel::mEstimator::Tukey, "gtsam_noiseModel_mEstimator_Tukey")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedNoiseModel, "gtsam_SharedNoiseModel")
BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal"); BOOST_CLASS_EXPORT_GUID(gtsam::SharedDiagonal, "gtsam_SharedDiagonal")
/* Create GUIDs for geometry */ /* Create GUIDs for geometry */
/* ************************************************************************* */ /* ************************************************************************* */
GTSAM_VALUE_EXPORT(gtsam::Point2); GTSAM_VALUE_EXPORT(gtsam::Point2)
GTSAM_VALUE_EXPORT(gtsam::StereoPoint2); GTSAM_VALUE_EXPORT(gtsam::StereoPoint2)
GTSAM_VALUE_EXPORT(gtsam::Point3); GTSAM_VALUE_EXPORT(gtsam::Point3)
GTSAM_VALUE_EXPORT(gtsam::Rot2); GTSAM_VALUE_EXPORT(gtsam::Rot2)
GTSAM_VALUE_EXPORT(gtsam::Rot3); GTSAM_VALUE_EXPORT(gtsam::Rot3)
GTSAM_VALUE_EXPORT(gtsam::Pose2); GTSAM_VALUE_EXPORT(gtsam::Pose2)
GTSAM_VALUE_EXPORT(gtsam::Pose3); GTSAM_VALUE_EXPORT(gtsam::Pose3)
GTSAM_VALUE_EXPORT(gtsam::Cal3_S2); GTSAM_VALUE_EXPORT(gtsam::Cal3_S2)
GTSAM_VALUE_EXPORT(gtsam::Cal3DS2); GTSAM_VALUE_EXPORT(gtsam::Cal3DS2)
GTSAM_VALUE_EXPORT(gtsam::Cal3_S2Stereo); GTSAM_VALUE_EXPORT(gtsam::Cal3_S2Stereo)
GTSAM_VALUE_EXPORT(gtsam::CalibratedCamera); GTSAM_VALUE_EXPORT(gtsam::CalibratedCamera)
GTSAM_VALUE_EXPORT(gtsam::PinholeCameraCal3_S2); GTSAM_VALUE_EXPORT(gtsam::PinholeCameraCal3_S2)
GTSAM_VALUE_EXPORT(gtsam::StereoCamera); GTSAM_VALUE_EXPORT(gtsam::StereoCamera)
/* Create GUIDs for factors */ /* Create GUIDs for factors */
/* ************************************************************************* */ /* ************************************************************************* */
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor"); BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "gtsam::JacobianFactor")
BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor"); BOOST_CLASS_EXPORT_GUID(gtsam::HessianFactor , "gtsam::HessianFactor")
BOOST_CLASS_EXPORT_GUID(PriorFactorPoint2, "gtsam::PriorFactorPoint2"); BOOST_CLASS_EXPORT_GUID(PriorFactorPoint2, "gtsam::PriorFactorPoint2")
BOOST_CLASS_EXPORT_GUID(PriorFactorStereoPoint2, "gtsam::PriorFactorStereoPoint2"); BOOST_CLASS_EXPORT_GUID(PriorFactorStereoPoint2, "gtsam::PriorFactorStereoPoint2")
BOOST_CLASS_EXPORT_GUID(PriorFactorPoint3, "gtsam::PriorFactorPoint3"); BOOST_CLASS_EXPORT_GUID(PriorFactorPoint3, "gtsam::PriorFactorPoint3")
BOOST_CLASS_EXPORT_GUID(PriorFactorRot2, "gtsam::PriorFactorRot2"); BOOST_CLASS_EXPORT_GUID(PriorFactorRot2, "gtsam::PriorFactorRot2")
BOOST_CLASS_EXPORT_GUID(PriorFactorRot3, "gtsam::PriorFactorRot3"); BOOST_CLASS_EXPORT_GUID(PriorFactorRot3, "gtsam::PriorFactorRot3")
BOOST_CLASS_EXPORT_GUID(PriorFactorPose2, "gtsam::PriorFactorPose2"); BOOST_CLASS_EXPORT_GUID(PriorFactorPose2, "gtsam::PriorFactorPose2")
BOOST_CLASS_EXPORT_GUID(PriorFactorPose3, "gtsam::PriorFactorPose3"); BOOST_CLASS_EXPORT_GUID(PriorFactorPose3, "gtsam::PriorFactorPose3")
BOOST_CLASS_EXPORT_GUID(PriorFactorCal3_S2, "gtsam::PriorFactorCal3_S2"); BOOST_CLASS_EXPORT_GUID(PriorFactorCal3_S2, "gtsam::PriorFactorCal3_S2")
BOOST_CLASS_EXPORT_GUID(PriorFactorCal3DS2, "gtsam::PriorFactorCal3DS2"); BOOST_CLASS_EXPORT_GUID(PriorFactorCal3DS2, "gtsam::PriorFactorCal3DS2")
BOOST_CLASS_EXPORT_GUID(PriorFactorCalibratedCamera, "gtsam::PriorFactorCalibratedCamera"); BOOST_CLASS_EXPORT_GUID(PriorFactorCalibratedCamera, "gtsam::PriorFactorCalibratedCamera")
BOOST_CLASS_EXPORT_GUID(PriorFactorStereoCamera, "gtsam::PriorFactorStereoCamera"); BOOST_CLASS_EXPORT_GUID(PriorFactorStereoCamera, "gtsam::PriorFactorStereoCamera")
BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint2, "gtsam::BetweenFactorPoint2"); BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint2, "gtsam::BetweenFactorPoint2")
BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint3, "gtsam::BetweenFactorPoint3"); BOOST_CLASS_EXPORT_GUID(BetweenFactorPoint3, "gtsam::BetweenFactorPoint3")
BOOST_CLASS_EXPORT_GUID(BetweenFactorRot2, "gtsam::BetweenFactorRot2"); BOOST_CLASS_EXPORT_GUID(BetweenFactorRot2, "gtsam::BetweenFactorRot2")
BOOST_CLASS_EXPORT_GUID(BetweenFactorRot3, "gtsam::BetweenFactorRot3"); BOOST_CLASS_EXPORT_GUID(BetweenFactorRot3, "gtsam::BetweenFactorRot3")
BOOST_CLASS_EXPORT_GUID(BetweenFactorPose2, "gtsam::BetweenFactorPose2"); BOOST_CLASS_EXPORT_GUID(BetweenFactorPose2, "gtsam::BetweenFactorPose2")
BOOST_CLASS_EXPORT_GUID(BetweenFactorPose3, "gtsam::BetweenFactorPose3"); BOOST_CLASS_EXPORT_GUID(BetweenFactorPose3, "gtsam::BetweenFactorPose3")
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint2, "gtsam::NonlinearEqualityPoint2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint2, "gtsam::NonlinearEqualityPoint2")
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoPoint2, "gtsam::NonlinearEqualityStereoPoint2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoPoint2, "gtsam::NonlinearEqualityStereoPoint2")
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint3, "gtsam::NonlinearEqualityPoint3"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPoint3, "gtsam::NonlinearEqualityPoint3")
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot2, "gtsam::NonlinearEqualityRot2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot2, "gtsam::NonlinearEqualityRot2")
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot3, "gtsam::NonlinearEqualityRot3"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityRot3, "gtsam::NonlinearEqualityRot3")
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose2, "gtsam::NonlinearEqualityPose2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose2, "gtsam::NonlinearEqualityPose2")
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose3, "gtsam::NonlinearEqualityPose3"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityPose3, "gtsam::NonlinearEqualityPose3")
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3_S2, "gtsam::NonlinearEqualityCal3_S2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3_S2, "gtsam::NonlinearEqualityCal3_S2")
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3DS2, "gtsam::NonlinearEqualityCal3DS2"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCal3DS2, "gtsam::NonlinearEqualityCal3DS2")
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCalibratedCamera, "gtsam::NonlinearEqualityCalibratedCamera"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityCalibratedCamera, "gtsam::NonlinearEqualityCalibratedCamera")
BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoCamera, "gtsam::NonlinearEqualityStereoCamera"); BOOST_CLASS_EXPORT_GUID(NonlinearEqualityStereoCamera, "gtsam::NonlinearEqualityStereoCamera")
BOOST_CLASS_EXPORT_GUID(RangeFactor2D, "gtsam::RangeFactor2D"); BOOST_CLASS_EXPORT_GUID(RangeFactor2D, "gtsam::RangeFactor2D")
BOOST_CLASS_EXPORT_GUID(RangeFactor3D, "gtsam::RangeFactor3D"); BOOST_CLASS_EXPORT_GUID(RangeFactor3D, "gtsam::RangeFactor3D")
BOOST_CLASS_EXPORT_GUID(RangeFactorPose2, "gtsam::RangeFactorPose2"); BOOST_CLASS_EXPORT_GUID(RangeFactorPose2, "gtsam::RangeFactorPose2")
BOOST_CLASS_EXPORT_GUID(RangeFactorPose3, "gtsam::RangeFactorPose3"); BOOST_CLASS_EXPORT_GUID(RangeFactorPose3, "gtsam::RangeFactorPose3")
BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCameraPoint, "gtsam::RangeFactorCalibratedCameraPoint"); BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCameraPoint, "gtsam::RangeFactorCalibratedCameraPoint")
BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2Point, "gtsam::RangeFactorPinholeCameraCal3_S2Point"); BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2Point, "gtsam::RangeFactorPinholeCameraCal3_S2Point")
BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCamera, "gtsam::RangeFactorCalibratedCamera"); BOOST_CLASS_EXPORT_GUID(RangeFactorCalibratedCamera, "gtsam::RangeFactorCalibratedCamera")
BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2, "gtsam::RangeFactorPinholeCameraCal3_S2"); BOOST_CLASS_EXPORT_GUID(RangeFactorPinholeCameraCal3_S2, "gtsam::RangeFactorPinholeCameraCal3_S2")
BOOST_CLASS_EXPORT_GUID(BearingRangeFactor2D, "gtsam::BearingRangeFactor2D"); BOOST_CLASS_EXPORT_GUID(BearingRangeFactor2D, "gtsam::BearingRangeFactor2D")
BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3_S2, "gtsam::GenericProjectionFactorCal3_S2"); BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3_S2, "gtsam::GenericProjectionFactorCal3_S2")
BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3DS2, "gtsam::GenericProjectionFactorCal3DS2"); BOOST_CLASS_EXPORT_GUID(GenericProjectionFactorCal3DS2, "gtsam::GenericProjectionFactorCal3DS2")
BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3_S2, "gtsam::GeneralSFMFactorCal3_S2"); BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3_S2, "gtsam::GeneralSFMFactorCal3_S2")
BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3DS2, "gtsam::GeneralSFMFactorCal3DS2"); BOOST_CLASS_EXPORT_GUID(GeneralSFMFactorCal3DS2, "gtsam::GeneralSFMFactorCal3DS2")
BOOST_CLASS_EXPORT_GUID(GeneralSFMFactor2Cal3_S2, "gtsam::GeneralSFMFactor2Cal3_S2"); BOOST_CLASS_EXPORT_GUID(GeneralSFMFactor2Cal3_S2, "gtsam::GeneralSFMFactor2Cal3_S2")
BOOST_CLASS_EXPORT_GUID(GenericStereoFactor3D, "gtsam::GenericStereoFactor3D"); BOOST_CLASS_EXPORT_GUID(GenericStereoFactor3D, "gtsam::GenericStereoFactor3D")
/* ************************************************************************* */ /* ************************************************************************* */

View File

@ -200,7 +200,7 @@ TEST(SubgraphPreconditioner, system) {
} }
/* ************************************************************************* */ /* ************************************************************************* */
BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "JacobianFactor"); BOOST_CLASS_EXPORT_GUID(gtsam::JacobianFactor, "JacobianFactor")
// Read from XML file // Read from XML file
static GaussianFactorGraph read(const string& name) { static GaussianFactorGraph read(const string& name) {