Merge branch 'feature/smartFactors'
commit
e4f2d39a35
|
|
@ -0,0 +1,79 @@
|
|||
3 7 18
|
||||
|
||||
0 0 -3.859900e+02 3.871200e+02
|
||||
1 0 -3.844000e+01 4.921200e+02
|
||||
2 0 -6.679200e+02 1.231100e+02
|
||||
0 1 3.838800e+02 -1.529999e+01
|
||||
1 1 5.597500e+02 -1.061500e+02
|
||||
0 2 5.915500e+02 1.364400e+02
|
||||
1 2 8.638600e+02 -2.346997e+01
|
||||
2 2 4.947200e+02 1.125200e+02
|
||||
0 3 5.925000e+02 1.257500e+02
|
||||
1 3 8.610800e+02 -3.521997e+01
|
||||
2 3 4.985400e+02 1.015600e+02
|
||||
0 4 3.487200e+02 5.583800e+02
|
||||
1 4 7.760300e+02 4.835300e+02
|
||||
2 4 7.780029e+00 3.263500e+02
|
||||
0 5 1.401001e+01 9.642001e+01
|
||||
1 5 2.071300e+02 1.183600e+02
|
||||
1 6 5.431801e+02 2.948100e+02
|
||||
2 6 -5.841998e+01 1.108300e+02
|
||||
|
||||
-1.6943983532198115e-02
|
||||
1.1171804676513932e-02
|
||||
2.4643508831711991e-03
|
||||
7.3030995682610689e-01
|
||||
-2.6490818471043420e-01
|
||||
-1.7127892627337182e+00
|
||||
1.4300319432711681e+03
|
||||
-7.5572758535864072e-08
|
||||
3.2377569465570913e-14
|
||||
|
||||
1.5049725341485708e-02
|
||||
-1.8504564785154357e-01
|
||||
-2.9278402790141456e-01
|
||||
-1.0590476152349551e+00
|
||||
-3.6017862414345798e-02
|
||||
-1.5720340175803784e+00
|
||||
1.4321374541298685e+03
|
||||
-7.3171919892612292e-08
|
||||
3.1759419019880947e-14
|
||||
|
||||
-3.0793597986873011e-01
|
||||
3.2077907982952031e-01
|
||||
2.2253985096991455e-01
|
||||
8.5034483295909009e+00
|
||||
6.7499603629668741e+00
|
||||
-3.6383814384447088e+00
|
||||
1.5720470590375264e+03
|
||||
-1.5962623661947355e-08
|
||||
-1.6507904848058800e-14
|
||||
|
||||
-1.2055995050700867e+01
|
||||
1.2838775976205760e+01
|
||||
-4.1099369264082803e+01
|
||||
|
||||
6.4168905904672933e+00
|
||||
3.8897031177598462e-01
|
||||
-2.3586282709150449e+01
|
||||
|
||||
1.3051100355717297e+01
|
||||
3.8387587111611952e+00
|
||||
-2.9777932175344951e+01
|
||||
|
||||
1.3060946673472820e+01
|
||||
3.5910521225905803e+00
|
||||
-2.9759080795372942e+01
|
||||
|
||||
1.4265764475421857e+01
|
||||
2.4096216156436530e+01
|
||||
-5.4823971067225500e+01
|
||||
|
||||
-2.5292283211391348e-01
|
||||
2.2166082122808284e+00
|
||||
-2.1712127480255084e+01
|
||||
|
||||
7.6465738085189585e+00
|
||||
1.4185331909846619e+01
|
||||
-5.2070299568846060e+01
|
||||
|
||||
|
|
@ -99,6 +99,9 @@ namespace gtsam {
|
|||
/// Return the diagonal of the Hessian for this factor
|
||||
virtual VectorValues hessianDiagonal() const = 0;
|
||||
|
||||
/// Return the diagonal of the Hessian for this factor (raw memory version)
|
||||
virtual void hessianDiagonal(double* d) const = 0;
|
||||
|
||||
/// Return the block diagonal of the Hessian for this factor
|
||||
virtual std::map<Key,Matrix> hessianBlockDiagonal() const = 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -358,6 +358,24 @@ VectorValues HessianFactor::hessianDiagonal() const {
|
|||
return d;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
|
||||
void HessianFactor::hessianDiagonal(double* d) const {
|
||||
|
||||
// Use eigen magic to access raw memory
|
||||
typedef Eigen::Matrix<double, 9, 1> DVector;
|
||||
typedef Eigen::Map<DVector> DMap;
|
||||
|
||||
// Loop over all variables in the factor
|
||||
for (DenseIndex pos = 0; pos < (DenseIndex)size(); ++pos) {
|
||||
Key j = keys_[pos];
|
||||
// Get the diagonal block, and insert its diagonal
|
||||
Matrix B = info_(pos, pos).selfadjointView();
|
||||
DVector dj = B.diagonal();
|
||||
DMap(d + 9 * j) += dj;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
map<Key,Matrix> HessianFactor::hessianBlockDiagonal() const {
|
||||
map<Key,Matrix> blocks;
|
||||
|
|
|
|||
|
|
@ -340,6 +340,9 @@ namespace gtsam {
|
|||
/// Return the diagonal of the Hessian for this factor
|
||||
virtual VectorValues hessianDiagonal() const;
|
||||
|
||||
/* ************************************************************************* */
|
||||
virtual void hessianDiagonal(double* d) const;
|
||||
|
||||
/// Return the block diagonal of the Hessian for this factor
|
||||
virtual std::map<Key,Matrix> hessianBlockDiagonal() const;
|
||||
|
||||
|
|
|
|||
|
|
@ -57,63 +57,56 @@ using namespace boost::assign;
|
|||
|
||||
namespace gtsam {
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor() :
|
||||
Ab_(cref_list_of<1>(1), 0)
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor() :
|
||||
Ab_(cref_list_of<1>(1), 0) {
|
||||
getb().setZero();
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const GaussianFactor& gf) {
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const GaussianFactor& gf) {
|
||||
// Copy the matrix data depending on what type of factor we're copying from
|
||||
if(const JacobianFactor* rhs = dynamic_cast<const JacobianFactor*>(&gf))
|
||||
if (const JacobianFactor* rhs = dynamic_cast<const JacobianFactor*>(&gf))
|
||||
*this = JacobianFactor(*rhs);
|
||||
else if(const HessianFactor* rhs = dynamic_cast<const HessianFactor*>(&gf))
|
||||
else if (const HessianFactor* rhs = dynamic_cast<const HessianFactor*>(&gf))
|
||||
*this = JacobianFactor(*rhs);
|
||||
else
|
||||
throw std::invalid_argument("In JacobianFactor(const GaussianFactor& rhs), rhs is neither a JacobianFactor nor a HessianFactor");
|
||||
}
|
||||
throw std::invalid_argument(
|
||||
"In JacobianFactor(const GaussianFactor& rhs), rhs is neither a JacobianFactor nor a HessianFactor");
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const Vector& b_in) :
|
||||
Ab_(cref_list_of<1>(1), b_in.size())
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const Vector& b_in) :
|
||||
Ab_(cref_list_of<1>(1), b_in.size()) {
|
||||
getb() = b_in;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(Key i1, const Matrix& A1,
|
||||
const Vector& b, const SharedDiagonal& model)
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(Key i1, const Matrix& A1, const Vector& b,
|
||||
const SharedDiagonal& model) {
|
||||
fillTerms(cref_list_of<1>(make_pair(i1, A1)), b, model);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(
|
||||
const Key i1, const Matrix& A1, Key i2, const Matrix& A2,
|
||||
const Vector& b, const SharedDiagonal& model)
|
||||
{
|
||||
fillTerms(cref_list_of<2>
|
||||
(make_pair(i1,A1))
|
||||
(make_pair(i2,A2)), b, model);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2,
|
||||
const Matrix& A2, const Vector& b, const SharedDiagonal& model) {
|
||||
fillTerms(cref_list_of<2>(make_pair(i1, A1))(make_pair(i2, A2)), b, model);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(
|
||||
const Key i1, const Matrix& A1, Key i2, const Matrix& A2,
|
||||
Key i3, const Matrix& A3, const Vector& b, const SharedDiagonal& model)
|
||||
{
|
||||
fillTerms(cref_list_of<3>
|
||||
(make_pair(i1,A1))
|
||||
(make_pair(i2,A2))
|
||||
(make_pair(i3,A3)), b, model);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const Key i1, const Matrix& A1, Key i2,
|
||||
const Matrix& A2, Key i3, const Matrix& A3, const Vector& b,
|
||||
const SharedDiagonal& model) {
|
||||
fillTerms(
|
||||
cref_list_of<3>(make_pair(i1, A1))(make_pair(i2, A2))(make_pair(i3, A3)),
|
||||
b, model);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const HessianFactor& factor) :
|
||||
Base(factor), Ab_(VerticalBlockMatrix::LikeActiveViewOf(factor.matrixObject(), factor.rows()))
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const HessianFactor& factor) :
|
||||
Base(factor), Ab_(
|
||||
VerticalBlockMatrix::LikeActiveViewOf(factor.matrixObject(),
|
||||
factor.rows())) {
|
||||
// Copy Hessian into our matrix and then do in-place Cholesky
|
||||
Ab_.full() = factor.matrixObject().full();
|
||||
|
||||
|
|
@ -123,7 +116,7 @@ namespace gtsam {
|
|||
boost::tie(maxrank, success) = choleskyCareful(Ab_.matrix());
|
||||
|
||||
// Check for indefinite system
|
||||
if(!success)
|
||||
if (!success)
|
||||
throw IndeterminantLinearSystemException(factor.keys().front());
|
||||
|
||||
// Zero out lower triangle
|
||||
|
|
@ -132,37 +125,39 @@ namespace gtsam {
|
|||
// FIXME: replace with triangular system
|
||||
Ab_.rowEnd() = maxrank;
|
||||
model_ = SharedDiagonal(); // should be same as Unit::Create(maxrank);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Helper functions for combine constructor
|
||||
namespace {
|
||||
boost::tuple<FastVector<DenseIndex>, DenseIndex, DenseIndex> _countDims(
|
||||
const FastVector<JacobianFactor::shared_ptr>& factors, const FastVector<VariableSlots::const_iterator>& variableSlots)
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
// Helper functions for combine constructor
|
||||
namespace {
|
||||
boost::tuple<FastVector<DenseIndex>, DenseIndex, DenseIndex> _countDims(
|
||||
const FastVector<JacobianFactor::shared_ptr>& factors,
|
||||
const FastVector<VariableSlots::const_iterator>& variableSlots) {
|
||||
gttic(countDims);
|
||||
#ifdef GTSAM_EXTRA_CONSISTENCY_CHECKS
|
||||
FastVector<DenseIndex> varDims(variableSlots.size(), numeric_limits<DenseIndex>::max());
|
||||
#else
|
||||
FastVector<DenseIndex> varDims(variableSlots.size(), numeric_limits<DenseIndex>::max());
|
||||
FastVector<DenseIndex> varDims(variableSlots.size(),
|
||||
numeric_limits<DenseIndex>::max());
|
||||
#endif
|
||||
DenseIndex m = 0;
|
||||
DenseIndex n = 0;
|
||||
for(size_t jointVarpos = 0; jointVarpos < variableSlots.size(); ++jointVarpos)
|
||||
{
|
||||
for (size_t jointVarpos = 0; jointVarpos < variableSlots.size();
|
||||
++jointVarpos) {
|
||||
const VariableSlots::const_iterator& slots = variableSlots[jointVarpos];
|
||||
|
||||
assert(slots->second.size() == factors.size());
|
||||
|
||||
bool foundVariable = false;
|
||||
for(size_t sourceFactorI = 0; sourceFactorI < slots->second.size(); ++sourceFactorI)
|
||||
{
|
||||
for (size_t sourceFactorI = 0; sourceFactorI < slots->second.size();
|
||||
++sourceFactorI) {
|
||||
const size_t sourceVarpos = slots->second[sourceFactorI];
|
||||
if(sourceVarpos != VariableSlots::Empty) {
|
||||
if (sourceVarpos != VariableSlots::Empty) {
|
||||
const JacobianFactor& sourceFactor = *factors[sourceFactorI];
|
||||
if(sourceFactor.cols() > 1) {
|
||||
if (sourceFactor.cols() > 1) {
|
||||
foundVariable = true;
|
||||
DenseIndex vardim = sourceFactor.getDim(sourceFactor.begin() + sourceVarpos);
|
||||
DenseIndex vardim = sourceFactor.getDim(
|
||||
sourceFactor.begin() + sourceVarpos);
|
||||
|
||||
#ifdef GTSAM_EXTRA_CONSISTENCY_CHECKS
|
||||
if(varDims[jointVarpos] == numeric_limits<DenseIndex>::max()) {
|
||||
|
|
@ -185,8 +180,9 @@ namespace gtsam {
|
|||
}
|
||||
}
|
||||
|
||||
if(!foundVariable)
|
||||
throw std::invalid_argument("Unable to determine dimensionality for all variables");
|
||||
if (!foundVariable)
|
||||
throw std::invalid_argument(
|
||||
"Unable to determine dimensionality for all variables");
|
||||
}
|
||||
|
||||
BOOST_FOREACH(const JacobianFactor::shared_ptr& factor, factors) {
|
||||
|
|
@ -200,44 +196,43 @@ namespace gtsam {
|
|||
#endif
|
||||
|
||||
return boost::make_tuple(varDims, m, n);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
FastVector<JacobianFactor::shared_ptr>
|
||||
_convertOrCastToJacobians(const GaussianFactorGraph& factors)
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
FastVector<JacobianFactor::shared_ptr> _convertOrCastToJacobians(
|
||||
const GaussianFactorGraph& factors) {
|
||||
gttic(Convert_to_Jacobians);
|
||||
FastVector<JacobianFactor::shared_ptr> jacobians;
|
||||
jacobians.reserve(factors.size());
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factors) {
|
||||
if(factor) {
|
||||
if(JacobianFactor::shared_ptr jf = boost::dynamic_pointer_cast<JacobianFactor>(factor))
|
||||
if (factor) {
|
||||
if (JacobianFactor::shared_ptr jf = boost::dynamic_pointer_cast<
|
||||
JacobianFactor>(factor))
|
||||
jacobians.push_back(jf);
|
||||
else
|
||||
jacobians.push_back(boost::make_shared<JacobianFactor>(*factor));
|
||||
}
|
||||
}
|
||||
return jacobians;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(
|
||||
const GaussianFactorGraph& graph,
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor::JacobianFactor(const GaussianFactorGraph& graph,
|
||||
boost::optional<const Ordering&> ordering,
|
||||
boost::optional<const VariableSlots&> variableSlots)
|
||||
{
|
||||
boost::optional<const VariableSlots&> variableSlots) {
|
||||
gttic(JacobianFactor_combine_constructor);
|
||||
|
||||
// Compute VariableSlots if one was not provided
|
||||
boost::optional<VariableSlots> computedVariableSlots;
|
||||
if(!variableSlots) {
|
||||
if (!variableSlots) {
|
||||
computedVariableSlots = VariableSlots(graph);
|
||||
variableSlots = computedVariableSlots; // Binds reference, does not copy VariableSlots
|
||||
}
|
||||
|
||||
// Cast or convert to Jacobians
|
||||
FastVector<JacobianFactor::shared_ptr> jacobians = _convertOrCastToJacobians(graph);
|
||||
FastVector<JacobianFactor::shared_ptr> jacobians = _convertOrCastToJacobians(
|
||||
graph);
|
||||
|
||||
gttic(Order_slots);
|
||||
// Order variable slots - we maintain the vector of ordered slots, as well as keep a list
|
||||
|
|
@ -245,22 +240,24 @@ namespace gtsam {
|
|||
// be added after all of the ordered variables.
|
||||
FastVector<VariableSlots::const_iterator> orderedSlots;
|
||||
orderedSlots.reserve(variableSlots->size());
|
||||
if(ordering) {
|
||||
if (ordering) {
|
||||
// If an ordering is provided, arrange the slots first that ordering
|
||||
FastList<VariableSlots::const_iterator> unorderedSlots;
|
||||
size_t nOrderingSlotsUsed = 0;
|
||||
orderedSlots.resize(ordering->size());
|
||||
FastMap<Key, size_t> inverseOrdering = ordering->invert();
|
||||
for(VariableSlots::const_iterator item = variableSlots->begin(); item != variableSlots->end(); ++item) {
|
||||
FastMap<Key, size_t>::const_iterator orderingPosition = inverseOrdering.find(item->first);
|
||||
if(orderingPosition == inverseOrdering.end()) {
|
||||
for (VariableSlots::const_iterator item = variableSlots->begin();
|
||||
item != variableSlots->end(); ++item) {
|
||||
FastMap<Key, size_t>::const_iterator orderingPosition =
|
||||
inverseOrdering.find(item->first);
|
||||
if (orderingPosition == inverseOrdering.end()) {
|
||||
unorderedSlots.push_back(item);
|
||||
} else {
|
||||
orderedSlots[orderingPosition->second] = item;
|
||||
++ nOrderingSlotsUsed;
|
||||
++nOrderingSlotsUsed;
|
||||
}
|
||||
}
|
||||
if(nOrderingSlotsUsed != ordering->size())
|
||||
if (nOrderingSlotsUsed != ordering->size())
|
||||
throw std::invalid_argument(
|
||||
"The ordering provided to the JacobianFactor combine constructor\n"
|
||||
"contained extra variables that did not appear in the factors to combine.");
|
||||
|
|
@ -271,7 +268,8 @@ namespace gtsam {
|
|||
} else {
|
||||
// If no ordering is provided, arrange the slots as they were, which will be sorted
|
||||
// numerically since VariableSlots uses a map sorting on Key.
|
||||
for(VariableSlots::const_iterator item = variableSlots->begin(); item != variableSlots->end(); ++item)
|
||||
for (VariableSlots::const_iterator item = variableSlots->begin();
|
||||
item != variableSlots->end(); ++item)
|
||||
orderedSlots.push_back(item);
|
||||
}
|
||||
gttoc(Order_slots);
|
||||
|
|
@ -285,26 +283,30 @@ namespace gtsam {
|
|||
gttic(allocate);
|
||||
Ab_ = VerticalBlockMatrix(varDims, m, true); // Allocate augmented matrix
|
||||
Base::keys_.resize(orderedSlots.size());
|
||||
boost::range::copy( // Get variable keys
|
||||
orderedSlots | boost::adaptors::indirected | boost::adaptors::map_keys, Base::keys_.begin());
|
||||
boost::range::copy(
|
||||
// Get variable keys
|
||||
orderedSlots | boost::adaptors::indirected | boost::adaptors::map_keys,
|
||||
Base::keys_.begin());
|
||||
gttoc(allocate);
|
||||
|
||||
// Loop over slots in combined factor and copy blocks from source factors
|
||||
gttic(copy_blocks);
|
||||
size_t combinedSlot = 0;
|
||||
BOOST_FOREACH(VariableSlots::const_iterator varslot, orderedSlots) {
|
||||
JacobianFactor::ABlock destSlot(this->getA(this->begin()+combinedSlot));
|
||||
JacobianFactor::ABlock destSlot(this->getA(this->begin() + combinedSlot));
|
||||
// Loop over source jacobians
|
||||
DenseIndex nextRow = 0;
|
||||
for(size_t factorI = 0; factorI < jacobians.size(); ++factorI) {
|
||||
for (size_t factorI = 0; factorI < jacobians.size(); ++factorI) {
|
||||
// Slot in source factor
|
||||
const size_t sourceSlot = varslot->second[factorI];
|
||||
const DenseIndex sourceRows = jacobians[factorI]->rows();
|
||||
if(sourceRows > 0) {
|
||||
JacobianFactor::ABlock::RowsBlockXpr destBlock(destSlot.middleRows(nextRow, sourceRows));
|
||||
if (sourceRows > 0) {
|
||||
JacobianFactor::ABlock::RowsBlockXpr destBlock(
|
||||
destSlot.middleRows(nextRow, sourceRows));
|
||||
// Copy if exists in source factor, otherwise set zero
|
||||
if(sourceSlot != VariableSlots::Empty)
|
||||
destBlock = jacobians[factorI]->getA(jacobians[factorI]->begin()+sourceSlot);
|
||||
if (sourceSlot != VariableSlots::Empty)
|
||||
destBlock = jacobians[factorI]->getA(
|
||||
jacobians[factorI]->begin() + sourceSlot);
|
||||
else
|
||||
destBlock.setZero();
|
||||
nextRow += sourceRows;
|
||||
|
|
@ -320,15 +322,16 @@ namespace gtsam {
|
|||
boost::optional<Vector> sigmas;
|
||||
// Loop over source jacobians
|
||||
DenseIndex nextRow = 0;
|
||||
for(size_t factorI = 0; factorI < jacobians.size(); ++factorI) {
|
||||
for (size_t factorI = 0; factorI < jacobians.size(); ++factorI) {
|
||||
const DenseIndex sourceRows = jacobians[factorI]->rows();
|
||||
if(sourceRows > 0) {
|
||||
if (sourceRows > 0) {
|
||||
this->getb().segment(nextRow, sourceRows) = jacobians[factorI]->getb();
|
||||
if(jacobians[factorI]->get_model()) {
|
||||
if (jacobians[factorI]->get_model()) {
|
||||
// If the factor has a noise model and we haven't yet allocated sigmas, allocate it.
|
||||
if(!sigmas)
|
||||
if (!sigmas)
|
||||
sigmas = Vector::Constant(m, 1.0);
|
||||
sigmas->segment(nextRow, sourceRows) = jacobians[factorI]->get_model()->sigmas();
|
||||
sigmas->segment(nextRow, sourceRows) =
|
||||
jacobians[factorI]->get_model()->sigmas();
|
||||
if (jacobians[factorI]->isConstrained())
|
||||
anyConstrained = true;
|
||||
}
|
||||
|
|
@ -337,44 +340,44 @@ namespace gtsam {
|
|||
}
|
||||
gttoc(copy_vectors);
|
||||
|
||||
if(sigmas)
|
||||
if (sigmas)
|
||||
this->setModel(anyConstrained, *sigmas);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void JacobianFactor::print(const string& s, const KeyFormatter& formatter) const
|
||||
{
|
||||
if(!s.empty())
|
||||
/* ************************************************************************* */
|
||||
void JacobianFactor::print(const string& s,
|
||||
const KeyFormatter& formatter) const {
|
||||
if (!s.empty())
|
||||
cout << s << "\n";
|
||||
for(const_iterator key = begin(); key != end(); ++key) {
|
||||
cout <<
|
||||
formatMatrixIndented((boost::format(" A[%1%] = ") % formatter(*key)).str(), getA(key))
|
||||
for (const_iterator key = begin(); key != end(); ++key) {
|
||||
cout
|
||||
<< formatMatrixIndented(
|
||||
(boost::format(" A[%1%] = ") % formatter(*key)).str(), getA(key))
|
||||
<< endl;
|
||||
}
|
||||
cout << formatMatrixIndented(" b = ", getb(), true) << "\n";
|
||||
if(model_)
|
||||
if (model_)
|
||||
model_->print(" Noise model: ");
|
||||
else
|
||||
cout << " No noise model" << endl;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// Check if two linear factors are equal
|
||||
bool JacobianFactor::equals(const GaussianFactor& f_, double tol) const
|
||||
{
|
||||
if(!dynamic_cast<const JacobianFactor*>(&f_))
|
||||
/* ************************************************************************* */
|
||||
// Check if two linear factors are equal
|
||||
bool JacobianFactor::equals(const GaussianFactor& f_, double tol) const {
|
||||
if (!dynamic_cast<const JacobianFactor*>(&f_))
|
||||
return false;
|
||||
else {
|
||||
const JacobianFactor& f(static_cast<const JacobianFactor&>(f_));
|
||||
|
||||
// Check keys
|
||||
if(keys() != f.keys())
|
||||
if (keys() != f.keys())
|
||||
return false;
|
||||
|
||||
// Check noise model
|
||||
if((model_ && !f.model_) || (!model_ && f.model_))
|
||||
if ((model_ && !f.model_) || (!model_ && f.model_))
|
||||
return false;
|
||||
if(model_ && f.model_ && !model_->equals(*f.model_, tol))
|
||||
if (model_ && f.model_ && !model_->equals(*f.model_, tol))
|
||||
return false;
|
||||
|
||||
// Check matrix sizes
|
||||
|
|
@ -384,156 +387,183 @@ namespace gtsam {
|
|||
// Check matrix contents
|
||||
constABlock Ab1(Ab_.range(0, Ab_.nBlocks()));
|
||||
constABlock Ab2(f.Ab_.range(0, f.Ab_.nBlocks()));
|
||||
for(size_t row=0; row< (size_t) Ab1.rows(); ++row)
|
||||
if(!equal_with_abs_tol(Ab1.row(row), Ab2.row(row), tol) &&
|
||||
!equal_with_abs_tol(-Ab1.row(row), Ab2.row(row), tol))
|
||||
for (size_t row = 0; row < (size_t) Ab1.rows(); ++row)
|
||||
if (!equal_with_abs_tol(Ab1.row(row), Ab2.row(row), tol)
|
||||
&& !equal_with_abs_tol(-Ab1.row(row), Ab2.row(row), tol))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector JacobianFactor::unweighted_error(const VectorValues& c) const {
|
||||
/* ************************************************************************* */
|
||||
Vector JacobianFactor::unweighted_error(const VectorValues& c) const {
|
||||
Vector e = -getb();
|
||||
for(size_t pos=0; pos<size(); ++pos)
|
||||
for (size_t pos = 0; pos < size(); ++pos)
|
||||
e += Ab_(pos) * c[keys_[pos]];
|
||||
return e;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector JacobianFactor::error_vector(const VectorValues& c) const {
|
||||
if(model_)
|
||||
/* ************************************************************************* */
|
||||
Vector JacobianFactor::error_vector(const VectorValues& c) const {
|
||||
if (model_)
|
||||
return model_->whiten(unweighted_error(c));
|
||||
else
|
||||
return unweighted_error(c);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
double JacobianFactor::error(const VectorValues& c) const {
|
||||
if (empty()) return 0;
|
||||
/* ************************************************************************* */
|
||||
double JacobianFactor::error(const VectorValues& c) const {
|
||||
if (empty())
|
||||
return 0;
|
||||
Vector weighted = error_vector(c);
|
||||
return 0.5 * weighted.dot(weighted);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix JacobianFactor::augmentedInformation() const {
|
||||
if(model_) {
|
||||
/* ************************************************************************* */
|
||||
Matrix JacobianFactor::augmentedInformation() const {
|
||||
if (model_) {
|
||||
Matrix AbWhitened = Ab_.full();
|
||||
model_->WhitenInPlace(AbWhitened);
|
||||
return AbWhitened.transpose() * AbWhitened;
|
||||
} else {
|
||||
return Ab_.full().transpose() * Ab_.full();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix JacobianFactor::information() const {
|
||||
if(model_) {
|
||||
/* ************************************************************************* */
|
||||
Matrix JacobianFactor::information() const {
|
||||
if (model_) {
|
||||
Matrix AWhitened = this->getA();
|
||||
model_->WhitenInPlace(AWhitened);
|
||||
return AWhitened.transpose() * AWhitened;
|
||||
} else {
|
||||
return this->getA().transpose() * this->getA();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues JacobianFactor::hessianDiagonal() const {
|
||||
/* ************************************************************************* */
|
||||
VectorValues JacobianFactor::hessianDiagonal() const {
|
||||
VectorValues d;
|
||||
for(size_t pos=0; pos<size(); ++pos)
|
||||
{
|
||||
for (size_t pos = 0; pos < size(); ++pos) {
|
||||
Key j = keys_[pos];
|
||||
size_t nj = Ab_(pos).cols();
|
||||
Vector dj(nj);
|
||||
for (size_t k = 0; k < nj; ++k) {
|
||||
Vector column_k = Ab_(pos).col(k);
|
||||
if (model_) column_k = model_->whiten(column_k);
|
||||
dj(k) = dot(column_k,column_k);
|
||||
if (model_)
|
||||
column_k = model_->whiten(column_k);
|
||||
dj(k) = dot(column_k, column_k);
|
||||
}
|
||||
d.insert(j,dj);
|
||||
d.insert(j, dj);
|
||||
}
|
||||
return d;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
map<Key,Matrix> JacobianFactor::hessianBlockDiagonal() const {
|
||||
map<Key,Matrix> blocks;
|
||||
for(size_t pos=0; pos<size(); ++pos)
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
|
||||
void JacobianFactor::hessianDiagonal(double* d) const {
|
||||
|
||||
// Use eigen magic to access raw memory
|
||||
typedef Eigen::Matrix<double, 9, 1> DVector;
|
||||
typedef Eigen::Map<DVector> DMap;
|
||||
|
||||
// Loop over all variables in the factor
|
||||
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||
// Get the diagonal block, and insert its diagonal
|
||||
DVector dj;
|
||||
for (size_t k = 0; k < 9; ++k)
|
||||
dj(k) = Ab_(j).col(k).squaredNorm();
|
||||
|
||||
DMap(d + 9 * j) += dj;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
map<Key, Matrix> JacobianFactor::hessianBlockDiagonal() const {
|
||||
map<Key, Matrix> blocks;
|
||||
for (size_t pos = 0; pos < size(); ++pos) {
|
||||
Key j = keys_[pos];
|
||||
Matrix Aj = Ab_(pos);
|
||||
if (model_) Aj = model_->Whiten(Aj);
|
||||
blocks.insert(make_pair(j,Aj.transpose()*Aj));
|
||||
if (model_)
|
||||
Aj = model_->Whiten(Aj);
|
||||
blocks.insert(make_pair(j, Aj.transpose() * Aj));
|
||||
}
|
||||
return blocks;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Vector JacobianFactor::operator*(const VectorValues& x) const {
|
||||
/* ************************************************************************* */
|
||||
Vector JacobianFactor::operator*(const VectorValues& x) const {
|
||||
Vector Ax = zero(Ab_.rows());
|
||||
if (empty()) return Ax;
|
||||
if (empty())
|
||||
return Ax;
|
||||
|
||||
// Just iterate over all A matrices and multiply in correct config part
|
||||
for(size_t pos=0; pos<size(); ++pos)
|
||||
for (size_t pos = 0; pos < size(); ++pos)
|
||||
Ax += Ab_(pos) * x[keys_[pos]];
|
||||
|
||||
return model_ ? model_->whiten(Ax) : Ax;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void JacobianFactor::transposeMultiplyAdd(double alpha, const Vector& e,
|
||||
VectorValues& x) const
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
void JacobianFactor::transposeMultiplyAdd(double alpha, const Vector& e,
|
||||
VectorValues& x) const {
|
||||
Vector E = alpha * (model_ ? model_->whiten(e) : e);
|
||||
// Just iterate over all A matrices and insert Ai^e into VectorValues
|
||||
for(size_t pos=0; pos<size(); ++pos)
|
||||
{
|
||||
for (size_t pos = 0; pos < size(); ++pos) {
|
||||
Key j = keys_[pos];
|
||||
// Create the value as a zero vector if it does not exist.
|
||||
pair<VectorValues::iterator, bool> xi = x.tryInsert(j, Vector());
|
||||
if(xi.second)
|
||||
if (xi.second)
|
||||
xi.first->second = Vector::Zero(getDim(begin() + pos));
|
||||
gtsam::transposeMultiplyAdd(Ab_(pos), E, xi.first->second);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void JacobianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||
/* ************************************************************************* */
|
||||
void JacobianFactor::multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||
VectorValues& y) const {
|
||||
Vector Ax = (*this)*x;
|
||||
transposeMultiplyAdd(alpha,Ax,y);
|
||||
}
|
||||
Vector Ax = (*this) * x;
|
||||
transposeMultiplyAdd(alpha, Ax, y);
|
||||
}
|
||||
|
||||
void JacobianFactor::multiplyHessianAdd(double alpha, const double* x, double* y, std::vector<size_t> keys) const {
|
||||
void JacobianFactor::multiplyHessianAdd(double alpha, const double* x,
|
||||
double* y, std::vector<size_t> keys) const {
|
||||
|
||||
// Use eigen magic to access raw memory
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> DVector;
|
||||
typedef Eigen::Map<DVector> DMap;
|
||||
typedef Eigen::Map<const DVector> ConstDMap;
|
||||
|
||||
if (empty()) return;
|
||||
if (empty())
|
||||
return;
|
||||
Vector Ax = zero(Ab_.rows());
|
||||
|
||||
// Just iterate over all A matrices and multiply in correct config part
|
||||
for(size_t pos=0; pos<size(); ++pos)
|
||||
Ax += Ab_(pos) * ConstDMap(x + keys[keys_[pos]],keys[keys_[pos]+1]-keys[keys_[pos]]);
|
||||
for (size_t pos = 0; pos < size(); ++pos)
|
||||
Ax += Ab_(pos)
|
||||
* ConstDMap(x + keys[keys_[pos]],
|
||||
keys[keys_[pos] + 1] - keys[keys_[pos]]);
|
||||
|
||||
// Deal with noise properly, need to Double* whiten as we are dividing by variance
|
||||
if (model_) { model_->whitenInPlace(Ax); model_->whitenInPlace(Ax); }
|
||||
if (model_) {
|
||||
model_->whitenInPlace(Ax);
|
||||
model_->whitenInPlace(Ax);
|
||||
}
|
||||
|
||||
// multiply with alpha
|
||||
Ax *= alpha;
|
||||
|
||||
// Again iterate over all A matrices and insert Ai^e into y
|
||||
for(size_t pos=0; pos<size(); ++pos)
|
||||
DMap(y + keys[keys_[pos]],keys[keys_[pos]+1]-keys[keys_[pos]]) += Ab_(pos).transpose() * Ax;
|
||||
for (size_t pos = 0; pos < size(); ++pos)
|
||||
DMap(y + keys[keys_[pos]], keys[keys_[pos] + 1] - keys[keys_[pos]]) += Ab_(
|
||||
pos).transpose() * Ax;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
VectorValues JacobianFactor::gradientAtZero() const {
|
||||
/* ************************************************************************* */
|
||||
VectorValues JacobianFactor::gradientAtZero() const {
|
||||
VectorValues g;
|
||||
Vector b = getb();
|
||||
// Gradient is really -A'*b / sigma^2
|
||||
|
|
@ -541,82 +571,82 @@ namespace gtsam {
|
|||
Vector b_sigma = model_ ? model_->whiten(b) : b;
|
||||
this->transposeMultiplyAdd(-1.0, b_sigma, g); // g -= A'*b/sigma^2
|
||||
return g;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<Matrix,Vector> JacobianFactor::jacobian() const {
|
||||
pair<Matrix,Vector> result = jacobianUnweighted();
|
||||
/* ************************************************************************* */
|
||||
pair<Matrix, Vector> JacobianFactor::jacobian() const {
|
||||
pair<Matrix, Vector> result = jacobianUnweighted();
|
||||
// divide in sigma so error is indeed 0.5*|Ax-b|
|
||||
if (model_)
|
||||
model_->WhitenSystem(result.first, result.second);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
pair<Matrix,Vector> JacobianFactor::jacobianUnweighted() const {
|
||||
/* ************************************************************************* */
|
||||
pair<Matrix, Vector> JacobianFactor::jacobianUnweighted() const {
|
||||
Matrix A(Ab_.range(0, size()));
|
||||
Vector b(getb());
|
||||
return make_pair(A, b);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix JacobianFactor::augmentedJacobian() const {
|
||||
/* ************************************************************************* */
|
||||
Matrix JacobianFactor::augmentedJacobian() const {
|
||||
Matrix Ab = augmentedJacobianUnweighted();
|
||||
if (model_)
|
||||
model_->WhitenInPlace(Ab);
|
||||
return Ab;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
Matrix JacobianFactor::augmentedJacobianUnweighted() const {
|
||||
/* ************************************************************************* */
|
||||
Matrix JacobianFactor::augmentedJacobianUnweighted() const {
|
||||
return Ab_.range(0, Ab_.nBlocks());
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor JacobianFactor::whiten() const {
|
||||
/* ************************************************************************* */
|
||||
JacobianFactor JacobianFactor::whiten() const {
|
||||
JacobianFactor result(*this);
|
||||
if(model_) {
|
||||
if (model_) {
|
||||
result.model_->WhitenInPlace(result.Ab_.full());
|
||||
result.model_ = SharedDiagonal();
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianFactor::shared_ptr JacobianFactor::negate() const {
|
||||
/* ************************************************************************* */
|
||||
GaussianFactor::shared_ptr JacobianFactor::negate() const {
|
||||
HessianFactor hessian(*this);
|
||||
return hessian.negate();
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<JacobianFactor> >
|
||||
JacobianFactor::eliminate(const Ordering& keys)
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
std::pair<boost::shared_ptr<GaussianConditional>,
|
||||
boost::shared_ptr<JacobianFactor> > JacobianFactor::eliminate(
|
||||
const Ordering& keys) {
|
||||
GaussianFactorGraph graph;
|
||||
graph.add(*this);
|
||||
return EliminateQR(graph, keys);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void JacobianFactor::setModel(bool anyConstrained, const Vector& sigmas) {
|
||||
if((size_t) sigmas.size() != this->rows())
|
||||
/* ************************************************************************* */
|
||||
void JacobianFactor::setModel(bool anyConstrained, const Vector& sigmas) {
|
||||
if ((size_t) sigmas.size() != this->rows())
|
||||
throw InvalidNoiseModel(this->rows(), sigmas.size());
|
||||
if (anyConstrained)
|
||||
model_ = noiseModel::Constrained::MixedSigmas(sigmas);
|
||||
else
|
||||
model_ = noiseModel::Diagonal::Sigmas(sigmas);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<JacobianFactor> >
|
||||
EliminateQR(const GaussianFactorGraph& factors, const Ordering& keys)
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
std::pair<boost::shared_ptr<GaussianConditional>,
|
||||
boost::shared_ptr<JacobianFactor> > EliminateQR(
|
||||
const GaussianFactorGraph& factors, const Ordering& keys) {
|
||||
gttic(EliminateQR);
|
||||
// Combine and sort variable blocks in elimination order
|
||||
JacobianFactor::shared_ptr jointFactor;
|
||||
try {
|
||||
jointFactor = boost::make_shared<JacobianFactor>(factors, keys);
|
||||
} catch(std::invalid_argument&) {
|
||||
} catch (std::invalid_argument&) {
|
||||
throw InvalidDenseElimination(
|
||||
"EliminateQR was called with a request to eliminate variables that are not\n"
|
||||
"involved in the provided factors.");
|
||||
|
|
@ -624,7 +654,7 @@ namespace gtsam {
|
|||
|
||||
// Do dense elimination
|
||||
SharedDiagonal noiseModel;
|
||||
if(jointFactor->model_)
|
||||
if (jointFactor->model_)
|
||||
jointFactor->model_ = jointFactor->model_->QR(jointFactor->Ab_.matrix());
|
||||
else
|
||||
inplace_QR(jointFactor->Ab_.matrix());
|
||||
|
|
@ -633,18 +663,20 @@ namespace gtsam {
|
|||
jointFactor->Ab_.matrix().triangularView<Eigen::StrictlyLower>().setZero();
|
||||
|
||||
// Split elimination result into conditional and remaining factor
|
||||
GaussianConditional::shared_ptr conditional = jointFactor->splitConditional(keys.size());
|
||||
GaussianConditional::shared_ptr conditional = jointFactor->splitConditional(
|
||||
keys.size());
|
||||
|
||||
return make_pair(conditional, jointFactor);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianConditional::shared_ptr JacobianFactor::splitConditional(size_t nrFrontals)
|
||||
{
|
||||
/* ************************************************************************* */
|
||||
GaussianConditional::shared_ptr JacobianFactor::splitConditional(
|
||||
size_t nrFrontals) {
|
||||
gttic(JacobianFactor_splitConditional);
|
||||
|
||||
if(nrFrontals > size())
|
||||
throw std::invalid_argument("Requesting to split more variables than exist using JacobianFactor::splitConditional");
|
||||
if (nrFrontals > size())
|
||||
throw std::invalid_argument(
|
||||
"Requesting to split more variables than exist using JacobianFactor::splitConditional");
|
||||
|
||||
DenseIndex frontalDim = Ab_.range(0, nrFrontals).cols();
|
||||
|
||||
|
|
@ -653,18 +685,20 @@ namespace gtsam {
|
|||
const DenseIndex originalRowEnd = Ab_.rowEnd();
|
||||
Ab_.rowEnd() = Ab_.rowStart() + frontalDim;
|
||||
SharedDiagonal conditionalNoiseModel;
|
||||
if(model_) {
|
||||
if((DenseIndex)model_->dim() < frontalDim)
|
||||
if (model_) {
|
||||
if ((DenseIndex) model_->dim() < frontalDim)
|
||||
throw IndeterminantLinearSystemException(this->keys().front());
|
||||
conditionalNoiseModel =
|
||||
noiseModel::Diagonal::Sigmas(model_->sigmas().segment(Ab_.rowStart(), Ab_.rows()));
|
||||
conditionalNoiseModel = noiseModel::Diagonal::Sigmas(
|
||||
model_->sigmas().segment(Ab_.rowStart(), Ab_.rows()));
|
||||
}
|
||||
GaussianConditional::shared_ptr conditional = boost::make_shared<GaussianConditional>(
|
||||
Base::keys_, nrFrontals, Ab_, conditionalNoiseModel);
|
||||
const DenseIndex maxRemainingRows = std::min(Ab_.cols() - 1, originalRowEnd) - Ab_.rowStart() - frontalDim;
|
||||
GaussianConditional::shared_ptr conditional = boost::make_shared<
|
||||
GaussianConditional>(Base::keys_, nrFrontals, Ab_, conditionalNoiseModel);
|
||||
const DenseIndex maxRemainingRows = std::min(Ab_.cols() - 1, originalRowEnd)
|
||||
- Ab_.rowStart() - frontalDim;
|
||||
const DenseIndex remainingRows =
|
||||
model_ ? std::min(model_->sigmas().size() - frontalDim, maxRemainingRows)
|
||||
: maxRemainingRows;
|
||||
model_ ?
|
||||
std::min(model_->sigmas().size() - frontalDim, maxRemainingRows) :
|
||||
maxRemainingRows;
|
||||
Ab_.rowStart() += frontalDim;
|
||||
Ab_.rowEnd() = Ab_.rowStart() + remainingRows;
|
||||
Ab_.firstBlock() += nrFrontals;
|
||||
|
|
@ -674,16 +708,18 @@ namespace gtsam {
|
|||
gttic(remaining_factor);
|
||||
keys_.erase(begin(), begin() + nrFrontals);
|
||||
// Set sigmas with the right model
|
||||
if(model_) {
|
||||
if (model_) {
|
||||
if (model_->isConstrained())
|
||||
model_ = noiseModel::Constrained::MixedSigmas(model_->sigmas().tail(remainingRows));
|
||||
model_ = noiseModel::Constrained::MixedSigmas(
|
||||
model_->sigmas().tail(remainingRows));
|
||||
else
|
||||
model_ = noiseModel::Diagonal::Sigmas(model_->sigmas().tail(remainingRows));
|
||||
model_ = noiseModel::Diagonal::Sigmas(
|
||||
model_->sigmas().tail(remainingRows));
|
||||
assert(model_->dim() == (size_t)Ab_.rows());
|
||||
}
|
||||
gttoc(remaining_factor);
|
||||
|
||||
return conditional;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -185,6 +185,9 @@ namespace gtsam {
|
|||
/// Return the diagonal of the Hessian for this factor
|
||||
virtual VectorValues hessianDiagonal() const;
|
||||
|
||||
/* ************************************************************************* */
|
||||
virtual void hessianDiagonal(double* d) const;
|
||||
|
||||
/// Return the block diagonal of the Hessian for this factor
|
||||
virtual std::map<Key,Matrix> hessianBlockDiagonal() const;
|
||||
|
||||
|
|
|
|||
|
|
@ -471,7 +471,7 @@ TEST(HessianFactor, hessianDiagonal)
|
|||
LONGS_EQUAL(2,actualBD.size());
|
||||
EXPECT(assert_equal(G11,actualBD[0]));
|
||||
EXPECT(assert_equal(G22,actualBD[1]));
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
|
|
|
|||
|
|
@ -99,6 +99,8 @@ void LevenbergMarquardtParams::print(const std::string& str) const {
|
|||
std::cout << " lambdaLowerBound: " << lambdaLowerBound << "\n";
|
||||
std::cout << " minModelFidelity: " << minModelFidelity << "\n";
|
||||
std::cout << " diagonalDamping: " << diagonalDamping << "\n";
|
||||
std::cout << " min_diagonal: " << min_diagonal_ << "\n";
|
||||
std::cout << " max_diagonal: " << max_diagonal_ << "\n";
|
||||
std::cout << " verbosityLM: "
|
||||
<< verbosityLMTranslator(verbosityLM) << "\n";
|
||||
std::cout.flush();
|
||||
|
|
|
|||
|
|
@ -0,0 +1,384 @@
|
|||
/**
|
||||
* @file ImplicitSchurFactor.h
|
||||
* @brief A new type of linear factor (GaussianFactor), which is subclass of GaussianFactor
|
||||
* @author Frank Dellaert
|
||||
* @author Luca Carlone
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <boost/foreach.hpp>
|
||||
#include <boost/make_shared.hpp>
|
||||
#include <iostream>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* ImplicitSchurFactor
|
||||
*/
|
||||
template<size_t D> //
|
||||
class ImplicitSchurFactor: public GaussianFactor {
|
||||
|
||||
public:
|
||||
typedef ImplicitSchurFactor This; ///< Typedef to this class
|
||||
typedef JacobianFactor Base; ///< Typedef to base class
|
||||
typedef boost::shared_ptr<This> shared_ptr; ///< shared_ptr to this class
|
||||
|
||||
protected:
|
||||
|
||||
typedef Eigen::Matrix<double, 2, D> Matrix2D; ///< type of an F block
|
||||
typedef Eigen::Matrix<double, 2, 3> Matrix23;
|
||||
typedef Eigen::Matrix<double, D, D> MatrixDD; ///< camera hessian
|
||||
typedef std::pair<Key, Matrix2D> KeyMatrix2D; ///< named F block
|
||||
|
||||
std::vector<KeyMatrix2D> Fblocks_; ///< All 2*D F blocks (one for each camera)
|
||||
Matrix3 PointCovariance_; ///< the 3*3 matrix P = inv(E'E) (2*2 if degenerate)
|
||||
Matrix E_; ///< The 2m*3 E Jacobian with respect to the point
|
||||
Vector b_; ///< 2m-dimensional RHS vector
|
||||
|
||||
public:
|
||||
|
||||
/// Constructor
|
||||
ImplicitSchurFactor() {
|
||||
}
|
||||
|
||||
/// Construct from blcoks of F, E, inv(E'*E), and RHS vector b
|
||||
ImplicitSchurFactor(const std::vector<KeyMatrix2D>& Fblocks, const Matrix& E,
|
||||
const Matrix3& P, const Vector& b) :
|
||||
Fblocks_(Fblocks), PointCovariance_(P), E_(E), b_(b) {
|
||||
initKeys();
|
||||
}
|
||||
|
||||
/// initialize keys from Fblocks
|
||||
void initKeys() {
|
||||
keys_.reserve(Fblocks_.size());
|
||||
BOOST_FOREACH(const KeyMatrix2D& it, Fblocks_)
|
||||
keys_.push_back(it.first);
|
||||
}
|
||||
|
||||
/// Destructor
|
||||
virtual ~ImplicitSchurFactor() {
|
||||
}
|
||||
|
||||
// Write access, only use for construction!
|
||||
|
||||
inline std::vector<KeyMatrix2D>& Fblocks() {
|
||||
return Fblocks_;
|
||||
}
|
||||
|
||||
inline Matrix3& PointCovariance() {
|
||||
return PointCovariance_;
|
||||
}
|
||||
|
||||
inline Matrix& E() {
|
||||
return E_;
|
||||
}
|
||||
|
||||
inline Vector& b() {
|
||||
return b_;
|
||||
}
|
||||
|
||||
/// Get matrix P
|
||||
inline const Matrix& getPointCovariance() const {
|
||||
return PointCovariance_;
|
||||
}
|
||||
|
||||
/// print
|
||||
void print(const std::string& s, const KeyFormatter& formatter) const {
|
||||
std::cout << " ImplicitSchurFactor " << std::endl;
|
||||
Factor::print(s);
|
||||
std::cout << " PointCovariance_ \n" << PointCovariance_ << std::endl;
|
||||
std::cout << " E_ \n" << E_ << std::endl;
|
||||
std::cout << " b_ \n" << b_.transpose() << std::endl;
|
||||
}
|
||||
|
||||
/// equals
|
||||
bool equals(const GaussianFactor& lf, double tol) const {
|
||||
if (!dynamic_cast<const ImplicitSchurFactor*>(&lf))
|
||||
return false;
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/// Degrees of freedom of camera
|
||||
virtual DenseIndex getDim(const_iterator variable) const {
|
||||
return D;
|
||||
}
|
||||
|
||||
virtual Matrix augmentedJacobian() const {
|
||||
throw std::runtime_error(
|
||||
"ImplicitSchurFactor::augmentedJacobian non implemented");
|
||||
return Matrix();
|
||||
}
|
||||
virtual std::pair<Matrix, Vector> jacobian() const {
|
||||
throw std::runtime_error("ImplicitSchurFactor::jacobian non implemented");
|
||||
return std::make_pair(Matrix(), Vector());
|
||||
}
|
||||
virtual Matrix augmentedInformation() const {
|
||||
throw std::runtime_error(
|
||||
"ImplicitSchurFactor::augmentedInformation non implemented");
|
||||
return Matrix();
|
||||
}
|
||||
virtual Matrix information() const {
|
||||
throw std::runtime_error(
|
||||
"ImplicitSchurFactor::information non implemented");
|
||||
return Matrix();
|
||||
}
|
||||
|
||||
/// Return the diagonal of the Hessian for this factor
|
||||
virtual VectorValues hessianDiagonal() const {
|
||||
// diag(Hessian) = diag(F' * (I - E * PointCov * E') * F);
|
||||
VectorValues d;
|
||||
|
||||
for (size_t pos = 0; pos < size(); ++pos) { // for each camera
|
||||
Key j = keys_[pos];
|
||||
|
||||
// Calculate Fj'*Ej for the current camera (observing a single point)
|
||||
// D x 3 = (D x 2) * (2 x 3)
|
||||
const Matrix2D& Fj = Fblocks_[pos].second;
|
||||
Eigen::Matrix<double, D, 3> FtE = Fj.transpose()
|
||||
* E_.block<2, 3>(2 * pos, 0);
|
||||
|
||||
Eigen::Matrix<double, D, 1> dj;
|
||||
for (size_t k = 0; k < D; ++k) { // for each diagonal element of the camera hessian
|
||||
// Vector column_k_Fj = Fj.col(k);
|
||||
dj(k) = Fj.col(k).squaredNorm(); // dot(column_k_Fj, column_k_Fj);
|
||||
// Vector column_k_FtE = FtE.row(k);
|
||||
// (1 x 1) = (1 x 3) * (3 * 3) * (3 x 1)
|
||||
dj(k) -= FtE.row(k) * PointCovariance_ * FtE.row(k).transpose();
|
||||
}
|
||||
d.insert(j, dj);
|
||||
}
|
||||
return d;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief add the contribution of this factor to the diagonal of the hessian
|
||||
* d(output) = d(input) + deltaHessianFactor
|
||||
*/
|
||||
void hessianDiagonal(double* d) const {
|
||||
// diag(Hessian) = diag(F' * (I - E * PointCov * E') * F);
|
||||
// Use eigen magic to access raw memory
|
||||
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||
typedef Eigen::Map<DVector> DMap;
|
||||
|
||||
for (size_t pos = 0; pos < size(); ++pos) { // for each camera in the factor
|
||||
Key j = keys_[pos];
|
||||
|
||||
// Calculate Fj'*Ej for the current camera (observing a single point)
|
||||
// D x 3 = (D x 2) * (2 x 3)
|
||||
const Matrix2D& Fj = Fblocks_[pos].second;
|
||||
Eigen::Matrix<double, D, 3> FtE = Fj.transpose()
|
||||
* E_.block<2, 3>(2 * pos, 0);
|
||||
|
||||
DVector dj;
|
||||
for (size_t k = 0; k < D; ++k) { // for each diagonal element of the camera hessian
|
||||
dj(k) = Fj.col(k).squaredNorm();
|
||||
// (1 x 1) = (1 x 3) * (3 * 3) * (3 x 1)
|
||||
dj(k) -= FtE.row(k) * PointCovariance_ * FtE.row(k).transpose();
|
||||
}
|
||||
DMap(d + D * j) += dj;
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the block diagonal of the Hessian for this factor
|
||||
virtual std::map<Key, Matrix> hessianBlockDiagonal() const {
|
||||
std::map<Key, Matrix> blocks;
|
||||
for (size_t pos = 0; pos < size(); ++pos) {
|
||||
Key j = keys_[pos];
|
||||
const Matrix2D& Fj = Fblocks_[pos].second;
|
||||
// F'*F - F'*E*P*E'*F (9*2)*(2*9) - (9*2)*(2*3)*(3*3)*(3*2)*(2*9)
|
||||
Eigen::Matrix<double, D, 3> FtE = Fj.transpose()
|
||||
* E_.block<2, 3>(2 * pos, 0);
|
||||
blocks[j] = Fj.transpose() * Fj
|
||||
- FtE * PointCovariance_ * FtE.transpose();
|
||||
// F'*(I - E*P*E')*F, TODO: this should work, but it does not :-(
|
||||
// static const Eigen::Matrix<double, 2, 2> I2 = eye(2);
|
||||
// Eigen::Matrix<double, 2, 2> Q = //
|
||||
// I2 - E_.block<2, 3>(2 * pos, 0) * PointCovariance_ * E_.block<2, 3>(2 * pos, 0).transpose();
|
||||
// blocks[j] = Fj.transpose() * Q * Fj;
|
||||
}
|
||||
return blocks;
|
||||
}
|
||||
|
||||
virtual GaussianFactor::shared_ptr clone() const {
|
||||
return boost::make_shared<ImplicitSchurFactor<D> >(Fblocks_,
|
||||
PointCovariance_, E_, b_);
|
||||
throw std::runtime_error("ImplicitSchurFactor::clone non implemented");
|
||||
}
|
||||
virtual bool empty() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual GaussianFactor::shared_ptr negate() const {
|
||||
return boost::make_shared<ImplicitSchurFactor<D> >(Fblocks_,
|
||||
PointCovariance_, E_, b_);
|
||||
throw std::runtime_error("ImplicitSchurFactor::negate non implemented");
|
||||
}
|
||||
|
||||
// Raw Vector version of y += F'*alpha*(I - E*P*E')*F*x, for testing
|
||||
static
|
||||
void multiplyHessianAdd(const Matrix& F, const Matrix& E,
|
||||
const Matrix& PointCovariance, double alpha, const Vector& x, Vector& y) {
|
||||
Vector e1 = F * x;
|
||||
Vector d1 = E.transpose() * e1;
|
||||
Vector d2 = PointCovariance * d1;
|
||||
Vector e2 = E * d2;
|
||||
Vector e3 = alpha * (e1 - e2);
|
||||
y += F.transpose() * e3;
|
||||
}
|
||||
|
||||
typedef std::vector<Vector2> Error2s;
|
||||
|
||||
/**
|
||||
* @brief Calculate corrected error Q*e = (I - E*P*E')*e
|
||||
*/
|
||||
void projectError(const Error2s& e1, Error2s& e2) const {
|
||||
|
||||
// d1 = E.transpose() * e1 = (3*2m)*2m
|
||||
Vector3 d1;
|
||||
d1.setZero();
|
||||
for (size_t k = 0; k < size(); k++)
|
||||
d1 += E_.block < 2, 3 > (2 * k, 0).transpose() * e1[k];
|
||||
|
||||
// d2 = E.transpose() * e1 = (3*2m)*2m
|
||||
Vector3 d2 = PointCovariance_ * d1;
|
||||
|
||||
// e3 = alpha*(e1 - E*d2) = 1*[2m-(2m*3)*3]
|
||||
for (size_t k = 0; k < size(); k++)
|
||||
e2[k] = e1[k] - E_.block < 2, 3 > (2 * k, 0) * d2;
|
||||
}
|
||||
|
||||
/// needed to be GaussianFactor - (I - E*P*E')*(F*x - b)
|
||||
virtual double error(const VectorValues& x) const {
|
||||
|
||||
// resize does not do malloc if correct size
|
||||
e1.resize(size());
|
||||
e2.resize(size());
|
||||
|
||||
// e1 = F * x - b = (2m*dm)*dm
|
||||
for (size_t k = 0; k < size(); ++k)
|
||||
e1[k] = Fblocks_[k].second * x.at(keys_[k]) - b_.segment < 2 > (k * 2);
|
||||
projectError(e1, e2);
|
||||
|
||||
double result = 0;
|
||||
for (size_t k = 0; k < size(); ++k)
|
||||
result += dot(e2[k], e2[k]);
|
||||
return 0.5 * result;
|
||||
}
|
||||
|
||||
/// Scratch space for multiplyHessianAdd
|
||||
mutable Error2s e1, e2;
|
||||
|
||||
/**
|
||||
* @brief double* Hessian-vector multiply, i.e. y += F'*alpha*(I - E*P*E')*F*x
|
||||
* RAW memory access! Assumes keys start at 0 and go to M-1, and x and and y are laid out that way
|
||||
*/
|
||||
void multiplyHessianAdd(double alpha, const double* x, double* y) const {
|
||||
|
||||
// Use eigen magic to access raw memory
|
||||
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||
typedef Eigen::Map<DVector> DMap;
|
||||
typedef Eigen::Map<const DVector> ConstDMap;
|
||||
|
||||
// resize does not do malloc if correct size
|
||||
e1.resize(size());
|
||||
e2.resize(size());
|
||||
|
||||
// e1 = F * x = (2m*dm)*dm
|
||||
size_t k = 0;
|
||||
BOOST_FOREACH(const KeyMatrix2D& it, Fblocks_) {
|
||||
Key key = it.first;
|
||||
e1[k++] = it.second * ConstDMap(x + D * key);
|
||||
}
|
||||
|
||||
projectError(e1, e2);
|
||||
|
||||
// y += F.transpose()*e2 = (2d*2m)*2m
|
||||
k = 0;
|
||||
BOOST_FOREACH(const KeyMatrix2D& it, Fblocks_) {
|
||||
Key key = it.first;
|
||||
DMap(y + D * key) += it.second.transpose() * alpha * e2[k++];
|
||||
}
|
||||
}
|
||||
|
||||
void multiplyHessianAdd(double alpha, const double* x, double* y,
|
||||
std::vector<size_t> keys) const {
|
||||
}
|
||||
;
|
||||
|
||||
/**
|
||||
* @brief Hessian-vector multiply, i.e. y += F'*alpha*(I - E*P*E')*F*x
|
||||
*/
|
||||
void multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||
VectorValues& y) const {
|
||||
|
||||
// resize does not do malloc if correct size
|
||||
e1.resize(size());
|
||||
e2.resize(size());
|
||||
|
||||
// e1 = F * x = (2m*dm)*dm
|
||||
for (size_t k = 0; k < size(); ++k)
|
||||
e1[k] = Fblocks_[k].second * x.at(keys_[k]);
|
||||
|
||||
projectError(e1, e2);
|
||||
|
||||
// y += F.transpose()*e2 = (2d*2m)*2m
|
||||
for (size_t k = 0; k < size(); ++k) {
|
||||
Key key = keys_[k];
|
||||
static const Vector empty;
|
||||
std::pair<VectorValues::iterator, bool> it = y.tryInsert(key, empty);
|
||||
Vector& yi = it.first->second;
|
||||
// Create the value as a zero vector if it does not exist.
|
||||
if (it.second)
|
||||
yi = Vector::Zero(Fblocks_[k].second.cols());
|
||||
yi += Fblocks_[k].second.transpose() * alpha * e2[k];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Dummy version to measure overhead of key access
|
||||
*/
|
||||
void multiplyHessianDummy(double alpha, const VectorValues& x,
|
||||
VectorValues& y) const {
|
||||
|
||||
BOOST_FOREACH(const KeyMatrix2D& Fi, Fblocks_) {
|
||||
static const Vector empty;
|
||||
Key key = Fi.first;
|
||||
std::pair<VectorValues::iterator, bool> it = y.tryInsert(key, empty);
|
||||
Vector& yi = it.first->second;
|
||||
yi = x.at(key);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate gradient, which is -F'Q*b, see paper
|
||||
*/
|
||||
VectorValues gradientAtZero() const {
|
||||
// calculate Q*b
|
||||
e1.resize(size());
|
||||
e2.resize(size());
|
||||
for (size_t k = 0; k < size(); k++)
|
||||
e1[k] = b_.segment < 2 > (2 * k);
|
||||
projectError(e1, e2);
|
||||
|
||||
// g = F.transpose()*e2
|
||||
VectorValues g;
|
||||
for (size_t k = 0; k < size(); ++k) {
|
||||
Key key = keys_[k];
|
||||
g.insert(key, -Fblocks_[k].second.transpose() * e2[k]);
|
||||
}
|
||||
|
||||
// return it
|
||||
return g;
|
||||
}
|
||||
|
||||
};
|
||||
// ImplicitSchurFactor
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
/*
|
||||
* @file JacobianFactorQ.h
|
||||
* @date Oct 27, 2013
|
||||
* @uthor Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "JacobianSchurFactor.h"
|
||||
|
||||
namespace gtsam {
|
||||
/**
|
||||
* JacobianFactor for Schur complement that uses Q noise model
|
||||
*/
|
||||
template<size_t D>
|
||||
class JacobianFactorQ: public JacobianSchurFactor<D> {
|
||||
|
||||
typedef JacobianSchurFactor<D> Base;
|
||||
|
||||
public:
|
||||
|
||||
/// Default constructor
|
||||
JacobianFactorQ() {
|
||||
}
|
||||
|
||||
/// Constructor
|
||||
JacobianFactorQ(const std::vector<typename Base::KeyMatrix2D>& Fblocks,
|
||||
const Matrix& E, const Matrix3& P, const Vector& b,
|
||||
const SharedDiagonal& model = SharedDiagonal()) :
|
||||
JacobianSchurFactor<D>() {
|
||||
size_t j = 0, m2 = E.rows(), m = m2 / 2;
|
||||
// Calculate projector Q
|
||||
Matrix Q = eye(m2) - E * P * E.transpose();
|
||||
// Calculate pre-computed Jacobian matrices
|
||||
// TODO: can we do better ?
|
||||
typedef std::pair<Key, Matrix> KeyMatrix;
|
||||
std::vector < KeyMatrix > QF;
|
||||
QF.reserve(m);
|
||||
// Below, we compute each 2m*D block A_j = Q_j * F_j = (2m*2) * (2*D)
|
||||
BOOST_FOREACH(const typename Base::KeyMatrix2D& it, Fblocks)
|
||||
QF.push_back(KeyMatrix(it.first, Q.block(0, 2 * j++, m2, 2) * it.second));
|
||||
// Which is then passed to the normal JacobianFactor constructor
|
||||
JacobianFactor::fillTerms(QF, Q * b, model);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* @file JacobianFactorQR.h
|
||||
* @brief Jacobianfactor that combines and eliminates points
|
||||
* @date Oct 27, 2013
|
||||
* @uthor Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include <gtsam_unstable/slam/JacobianSchurFactor.h>
|
||||
|
||||
namespace gtsam {
|
||||
/**
|
||||
* JacobianFactor for Schur complement that uses Q noise model
|
||||
*/
|
||||
template<size_t D>
|
||||
class JacobianFactorQR: public JacobianSchurFactor<D> {
|
||||
|
||||
typedef JacobianSchurFactor<D> Base;
|
||||
|
||||
public:
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
JacobianFactorQR(const std::vector<typename Base::KeyMatrix2D>& Fblocks,
|
||||
const Matrix& E, const Matrix3& P, const Vector& b,
|
||||
const SharedDiagonal& model = SharedDiagonal()) :
|
||||
JacobianSchurFactor<D>() {
|
||||
// Create a number of Jacobian factors in a factor graph
|
||||
GaussianFactorGraph gfg;
|
||||
Symbol pointKey('p', 0);
|
||||
size_t i = 0;
|
||||
BOOST_FOREACH(const typename Base::KeyMatrix2D& it, Fblocks) {
|
||||
gfg.add(pointKey, E.block<2, 3>(2 * i, 0), it.first, it.second,
|
||||
b.segment<2>(2 * i), model);
|
||||
i += 1;
|
||||
}
|
||||
//gfg.print("gfg");
|
||||
|
||||
// eliminate the point
|
||||
GaussianBayesNet::shared_ptr bn;
|
||||
GaussianFactorGraph::shared_ptr fg;
|
||||
std::vector < Key > variables;
|
||||
variables.push_back(pointKey);
|
||||
boost::tie(bn, fg) = gfg.eliminatePartialSequential(variables, EliminateQR);
|
||||
//fg->print("fg");
|
||||
|
||||
JacobianFactor::operator=(JacobianFactor(*fg));
|
||||
}
|
||||
};
|
||||
// class
|
||||
|
||||
}// gtsam
|
||||
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* @file JacobianFactorSVD.h
|
||||
* @date Oct 27, 2013
|
||||
* @uthor Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include "gtsam_unstable/slam/JacobianSchurFactor.h"
|
||||
|
||||
namespace gtsam {
|
||||
/**
|
||||
* JacobianFactor for Schur complement that uses Q noise model
|
||||
*/
|
||||
template<size_t D>
|
||||
class JacobianFactorSVD: public JacobianSchurFactor<D> {
|
||||
|
||||
public:
|
||||
|
||||
typedef Eigen::Matrix<double, 2, D> Matrix2D;
|
||||
typedef std::pair<Key, Matrix2D> KeyMatrix2D;
|
||||
|
||||
/// Default constructor
|
||||
JacobianFactorSVD() {}
|
||||
|
||||
/// Constructor
|
||||
JacobianFactorSVD(const std::vector<KeyMatrix2D>& Fblocks, const Matrix& Enull, const Vector& b,
|
||||
const SharedDiagonal& model = SharedDiagonal()) : JacobianSchurFactor<D>() {
|
||||
size_t numKeys = Enull.rows() / 2;
|
||||
size_t j = 0, m2 = 2*numKeys-3;
|
||||
// PLAIN NULL SPACE TRICK
|
||||
// Matrix Q = Enull * Enull.transpose();
|
||||
// BOOST_FOREACH(const KeyMatrix2D& it, Fblocks)
|
||||
// QF.push_back(KeyMatrix(it.first, Q.block(0, 2 * j++, m2, 2) * it.second));
|
||||
// JacobianFactor factor(QF, Q * b);
|
||||
typedef std::pair<Key, Matrix> KeyMatrix;
|
||||
std::vector<KeyMatrix> QF;
|
||||
QF.reserve(numKeys);
|
||||
BOOST_FOREACH(const KeyMatrix2D& it, Fblocks)
|
||||
QF.push_back(KeyMatrix(it.first, (Enull.transpose()).block(0, 2 * j++, m2, 2) * it.second));
|
||||
JacobianFactor::fillTerms(QF, Enull.transpose() * b, model);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
/*
|
||||
* @file JacobianSchurFactor.h
|
||||
* @brief Jacobianfactor that combines and eliminates points
|
||||
* @date Oct 27, 2013
|
||||
* @uthor Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/linear/VectorValues.h>
|
||||
#include <gtsam/linear/JacobianFactor.h>
|
||||
#include <gtsam/linear/GaussianBayesNet.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
#include <boost/foreach.hpp>
|
||||
|
||||
namespace gtsam {
|
||||
/**
|
||||
* JacobianFactor for Schur complement that uses Q noise model
|
||||
*/
|
||||
template<size_t D>
|
||||
class JacobianSchurFactor: public JacobianFactor {
|
||||
|
||||
public:
|
||||
|
||||
typedef Eigen::Matrix<double, 2, D> Matrix2D;
|
||||
typedef std::pair<Key, Matrix2D> KeyMatrix2D;
|
||||
|
||||
// Use eigen magic to access raw memory
|
||||
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||
typedef Eigen::Map<DVector> DMap;
|
||||
typedef Eigen::Map<const DVector> ConstDMap;
|
||||
|
||||
/**
|
||||
* @brief double* Matrix-vector multiply, i.e. y = A*x
|
||||
* RAW memory access! Assumes keys start at 0 and go to M-1, and x is laid out that way
|
||||
*/
|
||||
Vector operator*(const double* x) const {
|
||||
Vector Ax = zero(Ab_.rows());
|
||||
if (empty()) return Ax;
|
||||
|
||||
// Just iterate over all A matrices and multiply in correct config part
|
||||
for(size_t pos=0; pos<size(); ++pos)
|
||||
Ax += Ab_(pos) * ConstDMap(x + D * keys_[pos]);
|
||||
|
||||
return model_ ? model_->whiten(Ax) : Ax;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief double* Transpose Matrix-vector multiply, i.e. x += A'*e
|
||||
* RAW memory access! Assumes keys start at 0 and go to M-1, and y is laid out that way
|
||||
*/
|
||||
void transposeMultiplyAdd(double alpha, const Vector& e, double* x) const
|
||||
{
|
||||
Vector E = alpha * (model_ ? model_->whiten(e) : e);
|
||||
// Just iterate over all A matrices and insert Ai^e into y
|
||||
for(size_t pos=0; pos<size(); ++pos)
|
||||
DMap(x + D * keys_[pos]) += Ab_(pos).transpose() * E;
|
||||
}
|
||||
|
||||
/** y += alpha * A'*A*x */
|
||||
void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const {
|
||||
JacobianFactor::multiplyHessianAdd(alpha,x,y);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief double* Hessian-vector multiply, i.e. y += A'*(A*x)
|
||||
* RAW memory access! Assumes keys start at 0 and go to M-1, and x and and y are laid out that way
|
||||
*/
|
||||
void multiplyHessianAdd(double alpha, const double* x, double* y) const {
|
||||
// Vector Ax = (*this)*x;
|
||||
// this->transposeMultiplyAdd(alpha,Ax,y);
|
||||
if (empty()) return;
|
||||
Vector Ax = zero(Ab_.rows());
|
||||
|
||||
// Just iterate over all A matrices and multiply in correct config part
|
||||
for(size_t pos=0; pos<size(); ++pos)
|
||||
Ax += Ab_(pos) * ConstDMap(x + D * keys_[pos]);
|
||||
|
||||
// Deal with noise properly, need to Double* whiten as we are dividing by variance
|
||||
if (model_) { model_->whitenInPlace(Ax); model_->whitenInPlace(Ax); }
|
||||
|
||||
// multiply with alpha
|
||||
Ax *= alpha;
|
||||
|
||||
// Again iterate over all A matrices and insert Ai^e into y
|
||||
for(size_t pos=0; pos<size(); ++pos)
|
||||
DMap(y + D * keys_[pos]) += Ab_(pos).transpose() * Ax;
|
||||
}
|
||||
|
||||
}; // class
|
||||
|
||||
} // gtsam
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file RegularHessianFactor.h
|
||||
* @brief HessianFactor class with constant sized blcoks
|
||||
* @author Richard Roberts
|
||||
* @date Dec 8, 2010
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <gtsam/linear/HessianFactor.h>
|
||||
#include <boost/foreach.hpp>
|
||||
#include <vector>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
template<size_t D>
|
||||
class RegularHessianFactor: public HessianFactor {
|
||||
|
||||
private:
|
||||
|
||||
typedef Eigen::Matrix<double, D, D> MatrixDD; // camera hessian block
|
||||
typedef Eigen::Matrix<double, D, 1> VectorD;
|
||||
|
||||
public:
|
||||
|
||||
/** Construct an n-way factor. Gs contains the upper-triangle blocks of the
|
||||
* quadratic term (the Hessian matrix) provided in row-order, gs the pieces
|
||||
* of the linear vector term, and f the constant term.
|
||||
*/
|
||||
RegularHessianFactor(const std::vector<Key>& js,
|
||||
const std::vector<Matrix>& Gs, const std::vector<Vector>& gs, double f) :
|
||||
HessianFactor(js, Gs, gs, f) {
|
||||
}
|
||||
|
||||
/** Constructor with an arbitrary number of keys and with the augmented information matrix
|
||||
* specified as a block matrix. */
|
||||
template<typename KEYS>
|
||||
RegularHessianFactor(const KEYS& keys,
|
||||
const SymmetricBlockMatrix& augmentedInformation) :
|
||||
HessianFactor(keys, augmentedInformation) {
|
||||
}
|
||||
|
||||
/** y += alpha * A'*A*x */
|
||||
void multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||
VectorValues& y) const {
|
||||
HessianFactor::multiplyHessianAdd(alpha, x, y);
|
||||
}
|
||||
|
||||
// Scratch space for multiplyHessianAdd
|
||||
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||
mutable std::vector<DVector> y;
|
||||
|
||||
void multiplyHessianAdd(double alpha, const double* x,
|
||||
double* yvalues) const {
|
||||
// Create a vector of temporary y values, corresponding to rows i
|
||||
y.resize(size());
|
||||
BOOST_FOREACH(DVector & yi, y)
|
||||
yi.setZero();
|
||||
|
||||
typedef Eigen::Map<DVector> DMap;
|
||||
typedef Eigen::Map<const DVector> ConstDMap;
|
||||
|
||||
// Accessing the VectorValues one by one is expensive
|
||||
// So we will loop over columns to access x only once per column
|
||||
// And fill the above temporary y values, to be added into yvalues after
|
||||
DVector xj(D);
|
||||
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||
Key key = keys_[j];
|
||||
const double* xj = x + key * D;
|
||||
DenseIndex i = 0;
|
||||
for (; i < j; ++i)
|
||||
y[i] += info_(i, j).knownOffDiagonal() * ConstDMap(xj);
|
||||
// blocks on the diagonal are only half
|
||||
y[i] += info_(j, j).selfadjointView() * ConstDMap(xj);
|
||||
// for below diagonal, we take transpose block from upper triangular part
|
||||
for (i = j + 1; i < (DenseIndex) size(); ++i)
|
||||
y[i] += info_(i, j).knownOffDiagonal() * ConstDMap(xj);
|
||||
}
|
||||
|
||||
// copy to yvalues
|
||||
for (DenseIndex i = 0; i < (DenseIndex) size(); ++i) {
|
||||
Key key = keys_[i];
|
||||
DMap(yvalues + key * D) += alpha * y[i];
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,644 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SmartFactorBase.h
|
||||
* @brief Base class to create smart factors on poses or cameras
|
||||
* @author Luca Carlone
|
||||
* @author Zsolt Kira
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "JacobianFactorQ.h"
|
||||
#include "ImplicitSchurFactor.h"
|
||||
#include "RegularHessianFactor.h"
|
||||
|
||||
#include <gtsam/nonlinear/NonlinearFactor.h>
|
||||
#include <gtsam/geometry/PinholeCamera.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
#include <boost/make_shared.hpp>
|
||||
#include <vector>
|
||||
|
||||
namespace gtsam {
|
||||
/// Base class with no internal point, completely functional
|
||||
template<class POSE, class CALIBRATION, size_t D>
|
||||
class SmartFactorBase: public NonlinearFactor {
|
||||
protected:
|
||||
|
||||
// Keep a copy of measurement and calibration for I/O
|
||||
std::vector<Point2> measured_; ///< 2D measurement for each of the m views
|
||||
std::vector<SharedNoiseModel> noise_; ///< noise model used
|
||||
///< (important that the order is the same as the keys that we use to create the factor)
|
||||
|
||||
boost::optional<POSE> body_P_sensor_; ///< The pose of the sensor in the body frame (one for all cameras)
|
||||
|
||||
/// Definitions for blocks of F
|
||||
typedef Eigen::Matrix<double, 2, D> Matrix2D; // F
|
||||
typedef Eigen::Matrix<double, D, 2> MatrixD2; // F'
|
||||
typedef std::pair<Key, Matrix2D> KeyMatrix2D; // Fblocks
|
||||
typedef Eigen::Matrix<double, D, D> MatrixDD; // camera hessian block
|
||||
typedef Eigen::Matrix<double, 2, 3> Matrix23;
|
||||
typedef Eigen::Matrix<double, D, 1> VectorD;
|
||||
typedef Eigen::Matrix<double, 2, 2> Matrix2;
|
||||
|
||||
/// shorthand for base class type
|
||||
typedef NonlinearFactor Base;
|
||||
|
||||
/// shorthand for this class
|
||||
typedef SmartFactorBase<POSE, CALIBRATION, D> This;
|
||||
|
||||
public:
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
/// shorthand for a pinhole camera
|
||||
typedef PinholeCamera<CALIBRATION> Camera;
|
||||
typedef std::vector<Camera> Cameras;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param body_P_sensor is the transform from body to sensor frame (default identity)
|
||||
*/
|
||||
SmartFactorBase(boost::optional<POSE> body_P_sensor = boost::none) :
|
||||
body_P_sensor_(body_P_sensor) {
|
||||
}
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~SmartFactorBase() {
|
||||
}
|
||||
|
||||
/**
|
||||
* add a new measurement and pose key
|
||||
* @param measured_i is the 2m dimensional projection of a single landmark
|
||||
* @param poseKey is the index corresponding to the camera observing the landmark
|
||||
* @param noise_i is the measurement noise
|
||||
*/
|
||||
void add(const Point2& measured_i, const Key& poseKey_i,
|
||||
const SharedNoiseModel& noise_i) {
|
||||
this->measured_.push_back(measured_i);
|
||||
this->keys_.push_back(poseKey_i);
|
||||
this->noise_.push_back(noise_i);
|
||||
}
|
||||
|
||||
/**
|
||||
* variant of the previous add: adds a bunch of measurements, together with the camera keys and noises
|
||||
*/
|
||||
// ****************************************************************************************************
|
||||
void add(std::vector<Point2>& measurements, std::vector<Key>& poseKeys,
|
||||
std::vector<SharedNoiseModel>& noises) {
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
this->measured_.push_back(measurements.at(i));
|
||||
this->keys_.push_back(poseKeys.at(i));
|
||||
this->noise_.push_back(noises.at(i));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* variant of the previous add: adds a bunch of measurements and uses the same noise model for all of them
|
||||
*/
|
||||
// ****************************************************************************************************
|
||||
void add(std::vector<Point2>& measurements, std::vector<Key>& poseKeys,
|
||||
const SharedNoiseModel& noise) {
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
this->measured_.push_back(measurements.at(i));
|
||||
this->keys_.push_back(poseKeys.at(i));
|
||||
this->noise_.push_back(noise);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds an entire SfM_track (collection of cameras observing a single point).
|
||||
* The noise is assumed to be the same for all measurements
|
||||
*/
|
||||
// ****************************************************************************************************
|
||||
void add(const SfM_Track& trackToAdd, const SharedNoiseModel& noise) {
|
||||
for (size_t k = 0; k < trackToAdd.number_measurements(); k++) {
|
||||
this->measured_.push_back(trackToAdd.measurements[k].second);
|
||||
this->keys_.push_back(trackToAdd.measurements[k].first);
|
||||
this->noise_.push_back(noise);
|
||||
}
|
||||
}
|
||||
|
||||
/** return the measurements */
|
||||
const Vector& measured() const {
|
||||
return measured_;
|
||||
}
|
||||
|
||||
/** return the noise model */
|
||||
const SharedNoiseModel& noise() const {
|
||||
return noise_;
|
||||
}
|
||||
|
||||
/**
|
||||
* print
|
||||
* @param s optional string naming the factor
|
||||
* @param keyFormatter optional formatter useful for printing Symbols
|
||||
*/
|
||||
void print(const std::string& s = "", const KeyFormatter& keyFormatter =
|
||||
DefaultKeyFormatter) const {
|
||||
std::cout << s << "SmartFactorBase, z = \n";
|
||||
for (size_t k = 0; k < measured_.size(); ++k) {
|
||||
std::cout << "measurement, p = " << measured_[k] << "\t";
|
||||
noise_[k]->print("noise model = ");
|
||||
}
|
||||
if (this->body_P_sensor_)
|
||||
this->body_P_sensor_->print(" sensor pose in body frame: ");
|
||||
Base::print("", keyFormatter);
|
||||
}
|
||||
|
||||
/// equals
|
||||
virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const {
|
||||
const This *e = dynamic_cast<const This*>(&p);
|
||||
|
||||
bool areMeasurementsEqual = true;
|
||||
for (size_t i = 0; i < measured_.size(); i++) {
|
||||
if (this->measured_.at(i).equals(e->measured_.at(i), tol) == false)
|
||||
areMeasurementsEqual = false;
|
||||
break;
|
||||
}
|
||||
return e && Base::equals(p, tol) && areMeasurementsEqual
|
||||
&& ((!body_P_sensor_ && !e->body_P_sensor_)
|
||||
|| (body_P_sensor_ && e->body_P_sensor_
|
||||
&& body_P_sensor_->equals(*e->body_P_sensor_)));
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
/// Calculate vector of re-projection errors, before applying noise model
|
||||
Vector reprojectionError(const Cameras& cameras, const Point3& point) const {
|
||||
|
||||
Vector b = zero(2 * cameras.size());
|
||||
|
||||
size_t i = 0;
|
||||
BOOST_FOREACH(const Camera& camera, cameras) {
|
||||
const Point2& zi = this->measured_.at(i);
|
||||
try {
|
||||
Point2 e(camera.project(point) - zi);
|
||||
b[2 * i] = e.x();
|
||||
b[2 * i + 1] = e.y();
|
||||
} catch (CheiralityException& e) {
|
||||
std::cout << "Cheirality exception " << std::endl;
|
||||
exit (EXIT_FAILURE);
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
/**
|
||||
* Calculate the error of the factor.
|
||||
* This is the log-likelihood, e.g. \f$ 0.5(h(x)-z)^2/\sigma^2 \f$ in case of Gaussian.
|
||||
* In this class, we take the raw prediction error \f$ h(x)-z \f$, ask the noise model
|
||||
* to transform it to \f$ (h(x)-z)^2/\sigma^2 \f$, and then multiply by 0.5.
|
||||
* This is different from reprojectionError(cameras,point) as each point is whitened
|
||||
*/
|
||||
double totalReprojectionError(const Cameras& cameras,
|
||||
const Point3& point) const {
|
||||
|
||||
double overallError = 0;
|
||||
|
||||
size_t i = 0;
|
||||
BOOST_FOREACH(const Camera& camera, cameras) {
|
||||
const Point2& zi = this->measured_.at(i);
|
||||
try {
|
||||
Point2 reprojectionError(camera.project(point) - zi);
|
||||
overallError += 0.5
|
||||
* this->noise_.at(i)->distance(reprojectionError.vector());
|
||||
} catch (CheiralityException& e) {
|
||||
std::cout << "Cheirality exception " << std::endl;
|
||||
exit (EXIT_FAILURE);
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
return overallError;
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
/// Assumes non-degenerate !
|
||||
void computeEP(Matrix& E, Matrix& PointCov, const Cameras& cameras,
|
||||
const Point3& point) const {
|
||||
|
||||
int numKeys = this->keys_.size();
|
||||
E = zeros(2 * numKeys, 3);
|
||||
Vector b = zero(2 * numKeys);
|
||||
|
||||
Matrix Ei(2, 3);
|
||||
for (size_t i = 0; i < this->measured_.size(); i++) {
|
||||
try {
|
||||
cameras[i].project(point, boost::none, Ei);
|
||||
} catch (CheiralityException& e) {
|
||||
std::cout << "Cheirality exception " << std::endl;
|
||||
exit (EXIT_FAILURE);
|
||||
}
|
||||
this->noise_.at(i)->WhitenSystem(Ei, b);
|
||||
E.block<2, 3>(2 * i, 0) = Ei;
|
||||
}
|
||||
|
||||
// Matrix PointCov;
|
||||
PointCov.noalias() = (E.transpose() * E).inverse();
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
/// Compute F, E only (called below in both vanilla and SVD versions)
|
||||
/// Given a Point3, assumes dimensionality is 3
|
||||
double computeJacobians(std::vector<KeyMatrix2D>& Fblocks, Matrix& E,
|
||||
Vector& b, const Cameras& cameras, const Point3& point) const {
|
||||
|
||||
int numKeys = this->keys_.size();
|
||||
E = zeros(2 * numKeys, 3);
|
||||
b = zero(2 * numKeys);
|
||||
double f = 0;
|
||||
|
||||
Matrix Fi(2, 6), Ei(2, 3), Hcali(2, D - 6), Hcam(2, D);
|
||||
for (size_t i = 0; i < this->measured_.size(); i++) {
|
||||
|
||||
Vector bi;
|
||||
try {
|
||||
bi =
|
||||
-(cameras[i].project(point, Fi, Ei, Hcali) - this->measured_.at(i)).vector();
|
||||
} catch (CheiralityException& e) {
|
||||
std::cout << "Cheirality exception " << std::endl;
|
||||
exit (EXIT_FAILURE);
|
||||
}
|
||||
this->noise_.at(i)->WhitenSystem(Fi, Ei, Hcali, bi);
|
||||
|
||||
f += bi.squaredNorm();
|
||||
if (D == 6) { // optimize only camera pose
|
||||
Fblocks.push_back(KeyMatrix2D(this->keys_[i], Fi));
|
||||
} else {
|
||||
Hcam.block<2, 6>(0, 0) = Fi; // 2 x 6 block for the cameras
|
||||
Hcam.block<2, D - 6>(0, 6) = Hcali; // 2 x nrCal block for the cameras
|
||||
Fblocks.push_back(KeyMatrix2D(this->keys_[i], Hcam));
|
||||
}
|
||||
E.block<2, 3>(2 * i, 0) = Ei;
|
||||
subInsert(b, bi, 2 * i);
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
/// Version that computes PointCov, with optional lambda parameter
|
||||
double computeJacobians(std::vector<KeyMatrix2D>& Fblocks, Matrix& E,
|
||||
Matrix3& PointCov, Vector& b, const Cameras& cameras, const Point3& point,
|
||||
double lambda = 0.0, bool diagonalDamping = false) const {
|
||||
|
||||
double f = computeJacobians(Fblocks, E, b, cameras, point);
|
||||
|
||||
// Point covariance inv(E'*E)
|
||||
Matrix3 EtE = E.transpose() * E;
|
||||
Matrix3 DMatrix = eye(E.cols()); // damping matrix
|
||||
|
||||
if (diagonalDamping) { // diagonal of the hessian
|
||||
DMatrix(0, 0) = EtE(0, 0);
|
||||
DMatrix(1, 1) = EtE(1, 1);
|
||||
DMatrix(2, 2) = EtE(2, 2);
|
||||
}
|
||||
|
||||
PointCov.noalias() = (EtE + lambda * DMatrix).inverse();
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
// TODO, there should not be a Matrix version, really
|
||||
double computeJacobians(Matrix& F, Matrix& E, Matrix3& PointCov, Vector& b,
|
||||
const Cameras& cameras, const Point3& point,
|
||||
const double lambda = 0.0) const {
|
||||
|
||||
int numKeys = this->keys_.size();
|
||||
std::vector < KeyMatrix2D > Fblocks;
|
||||
double f = computeJacobians(Fblocks, E, PointCov, b, cameras, point,
|
||||
lambda);
|
||||
F = zeros(2 * numKeys, D * numKeys);
|
||||
|
||||
for (size_t i = 0; i < this->keys_.size(); ++i) {
|
||||
F.block<2, D>(2 * i, D * i) = Fblocks.at(i).second; // 2 x 6 block for the cameras
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
/// SVD version
|
||||
double computeJacobiansSVD(std::vector<KeyMatrix2D>& Fblocks, Matrix& Enull,
|
||||
Vector& b, const Cameras& cameras, const Point3& point, double lambda =
|
||||
0.0, bool diagonalDamping = false) const {
|
||||
|
||||
Matrix E;
|
||||
Matrix3 PointCov; // useless
|
||||
double f = computeJacobians(Fblocks, E, PointCov, b, cameras, point, lambda,
|
||||
diagonalDamping); // diagonalDamping should have no effect (only on PointCov)
|
||||
|
||||
// Do SVD on A
|
||||
Eigen::JacobiSVD < Matrix > svd(E, Eigen::ComputeFullU);
|
||||
Vector s = svd.singularValues();
|
||||
// Enull = zeros(2 * numKeys, 2 * numKeys - 3);
|
||||
int numKeys = this->keys_.size();
|
||||
Enull = svd.matrixU().block(0, 3, 2 * numKeys, 2 * numKeys - 3); // last 2m-3 columns
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
/// Matrix version of SVD
|
||||
// TODO, there should not be a Matrix version, really
|
||||
double computeJacobiansSVD(Matrix& F, Matrix& Enull, Vector& b,
|
||||
const Cameras& cameras, const Point3& point) const {
|
||||
|
||||
int numKeys = this->keys_.size();
|
||||
std::vector < KeyMatrix2D > Fblocks;
|
||||
double f = computeJacobiansSVD(Fblocks, Enull, b, cameras, point);
|
||||
F.resize(2 * numKeys, D * numKeys);
|
||||
F.setZero();
|
||||
|
||||
for (size_t i = 0; i < this->keys_.size(); ++i)
|
||||
F.block<2, D>(2 * i, D * i) = Fblocks.at(i).second; // 2 x 6 block for the cameras
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
/// linearize returns a Hessianfactor that is an approximation of error(p)
|
||||
boost::shared_ptr<RegularHessianFactor<D> > createHessianFactor(
|
||||
const Cameras& cameras, const Point3& point, const double lambda = 0.0,
|
||||
bool diagonalDamping = false) const {
|
||||
|
||||
int numKeys = this->keys_.size();
|
||||
|
||||
std::vector < KeyMatrix2D > Fblocks;
|
||||
Matrix E;
|
||||
Matrix3 PointCov;
|
||||
Vector b;
|
||||
double f = computeJacobians(Fblocks, E, PointCov, b, cameras, point, lambda,
|
||||
diagonalDamping);
|
||||
|
||||
//#define HESSIAN_BLOCKS
|
||||
#ifdef HESSIAN_BLOCKS
|
||||
// Create structures for Hessian Factors
|
||||
std::vector < Matrix > Gs(numKeys * (numKeys + 1) / 2);
|
||||
std::vector < Vector > gs(numKeys);
|
||||
|
||||
sparseSchurComplement(Fblocks, E, PointCov, b, Gs, gs);
|
||||
// schurComplement(Fblocks, E, PointCov, b, Gs, gs);
|
||||
|
||||
//std::vector < Matrix > Gs2(Gs.begin(), Gs.end());
|
||||
//std::vector < Vector > gs2(gs.begin(), gs.end());
|
||||
|
||||
return boost::make_shared < RegularHessianFactor<D>
|
||||
> (this->keys_, Gs, gs, f);
|
||||
#else
|
||||
size_t n1 = D * numKeys + 1;
|
||||
std::vector<DenseIndex> dims(numKeys + 1); // this also includes the b term
|
||||
std::fill(dims.begin(), dims.end() - 1, D);
|
||||
dims.back() = 1;
|
||||
|
||||
SymmetricBlockMatrix augmentedHessian(dims, Matrix::Zero(n1, n1)); // for 10 cameras, size should be (10*D+1 x 10*D+1)
|
||||
sparseSchurComplement(Fblocks, E, PointCov, b, augmentedHessian); // augmentedHessian.matrix().block<D,D> (i1,i2) = ...
|
||||
augmentedHessian(numKeys,numKeys)(0,0) = f;
|
||||
return boost::make_shared<RegularHessianFactor<D> >(
|
||||
this->keys_, augmentedHessian);
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
boost::shared_ptr<RegularHessianFactor<D> > updateAugmentedHessian(
|
||||
const Cameras& cameras, const Point3& point, const double lambda,
|
||||
bool diagonalDamping, SymmetricBlockMatrix& augmentedHessian) const {
|
||||
|
||||
int numKeys = this->keys_.size();
|
||||
|
||||
std::vector < KeyMatrix2D > Fblocks;
|
||||
Matrix E;
|
||||
Matrix3 PointCov;
|
||||
Vector b;
|
||||
double f = computeJacobians(Fblocks, E, PointCov, b, cameras, point, lambda,
|
||||
diagonalDamping);
|
||||
|
||||
std::vector<DenseIndex> dims(numKeys + 1); // this also includes the b term
|
||||
std::fill(dims.begin(), dims.end() - 1, D);
|
||||
dims.back() = 1;
|
||||
|
||||
updateSparseSchurComplement(Fblocks, E, PointCov, b, augmentedHessian); // augmentedHessian.matrix().block<D,D> (i1,i2) = ...
|
||||
std::cout << "f "<< f <<std::endl;
|
||||
augmentedHessian(numKeys,numKeys)(0,0) += f;
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
void schurComplement(const std::vector<KeyMatrix2D>& Fblocks, const Matrix& E,
|
||||
const Matrix& PointCov, const Vector& b,
|
||||
/*output ->*/std::vector<Matrix>& Gs, std::vector<Vector>& gs) const {
|
||||
// Schur complement trick
|
||||
// Gs = F' * F - F' * E * inv(E'*E) * E' * F
|
||||
// gs = F' * (b - E * inv(E'*E) * E' * b)
|
||||
// This version uses full matrices
|
||||
|
||||
int numKeys = this->keys_.size();
|
||||
|
||||
/// Compute Full F ????
|
||||
Matrix F = zeros(2 * numKeys, D * numKeys);
|
||||
for (size_t i = 0; i < this->keys_.size(); ++i)
|
||||
F.block<2, D>(2 * i, D * i) = Fblocks.at(i).second; // 2 x 6 block for the cameras
|
||||
|
||||
Matrix H(D * numKeys, D * numKeys);
|
||||
Vector gs_vector;
|
||||
|
||||
H.noalias() = F.transpose() * (F - (E * (PointCov * (E.transpose() * F))));
|
||||
gs_vector.noalias() = F.transpose()
|
||||
* (b - (E * (PointCov * (E.transpose() * b))));
|
||||
|
||||
// Populate Gs and gs
|
||||
int GsCount2 = 0;
|
||||
for (DenseIndex i1 = 0; i1 < numKeys; i1++) { // for each camera
|
||||
DenseIndex i1D = i1 * D;
|
||||
gs.at(i1) = gs_vector.segment < D > (i1D);
|
||||
for (DenseIndex i2 = 0; i2 < numKeys; i2++) {
|
||||
if (i2 >= i1) {
|
||||
Gs.at(GsCount2) = H.block<D, D>(i1D, i2 * D);
|
||||
GsCount2++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
void updateSparseSchurComplement(const std::vector<KeyMatrix2D>& Fblocks,
|
||||
const Matrix& E, const Matrix& P /*Point Covariance*/, const Vector& b,
|
||||
/*output ->*/SymmetricBlockMatrix& augmentedHessian) const {
|
||||
// Schur complement trick
|
||||
// Gs = F' * F - F' * E * P * E' * F
|
||||
// gs = F' * (b - E * P * E' * b)
|
||||
|
||||
// a single point is observed in numKeys cameras
|
||||
size_t numKeys = this->keys_.size(); // cameras observing current point
|
||||
size_t aug_numKeys = augmentedHessian.rows() - 1; // all cameras in the group
|
||||
|
||||
// Blockwise Schur complement
|
||||
for (size_t i1 = 0; i1 < numKeys; i1++) { // for each camera
|
||||
|
||||
const Matrix2D& Fi1 = Fblocks.at(i1).second;
|
||||
const Matrix23 Ei1_P = E.block<2, 3>(2 * i1, 0) * P;
|
||||
|
||||
// D = (Dx2) * (2)
|
||||
// (augmentedHessian.matrix()).block<D,1> (i1,numKeys+1) = Fi1.transpose() * b.segment < 2 > (2 * i1); // F' * b
|
||||
size_t aug_i1 = this->keys_[i1];
|
||||
std::cout << "i1 "<< i1 <<std::endl;
|
||||
std::cout << "aug_i1 "<< aug_i1 <<std::endl;
|
||||
std::cout << "aug_numKeys "<< aug_numKeys <<std::endl;
|
||||
augmentedHessian(aug_i1,aug_numKeys) = //augmentedHessian(aug_i1,aug_numKeys) +
|
||||
Fi1.transpose() * b.segment < 2 > (2 * i1) // F' * b
|
||||
- Fi1.transpose() * (Ei1_P * (E.transpose() * b)); // D = (Dx2) * (2x3) * (3*2m) * (2m x 1)
|
||||
|
||||
// (DxD) = (Dx2) * ( (2xD) - (2x3) * (3x2) * (2xD) )
|
||||
std::cout << "filled 1 " <<std::endl;
|
||||
augmentedHessian(aug_i1,aug_i1) = //augmentedHessian(aug_i1,aug_i1) +
|
||||
Fi1.transpose() * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1);
|
||||
|
||||
// upper triangular part of the hessian
|
||||
for (size_t i2 = i1+1; i2 < numKeys; i2++) { // for each camera
|
||||
const Matrix2D& Fi2 = Fblocks.at(i2).second;
|
||||
size_t aug_i2 = this->keys_[i2];
|
||||
std::cout << "i2 "<< i2 <<std::endl;
|
||||
std::cout << "aug_i2 "<< aug_i2 <<std::endl;
|
||||
|
||||
// (DxD) = (Dx2) * ( (2x2) * (2xD) )
|
||||
augmentedHessian(aug_i1, aug_i2) = //augmentedHessian(aug_i1, aug_i2)
|
||||
- Fi1.transpose() * (Ei1_P * E.block<2, 3>(2 * i2, 0).transpose() * Fi2);
|
||||
}
|
||||
} // end of for over cameras
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
void sparseSchurComplement(const std::vector<KeyMatrix2D>& Fblocks,
|
||||
const Matrix& E, const Matrix& P /*Point Covariance*/, const Vector& b,
|
||||
/*output ->*/SymmetricBlockMatrix& augmentedHessian) const {
|
||||
// Schur complement trick
|
||||
// Gs = F' * F - F' * E * P * E' * F
|
||||
// gs = F' * (b - E * P * E' * b)
|
||||
|
||||
// a single point is observed in numKeys cameras
|
||||
size_t numKeys = this->keys_.size();
|
||||
|
||||
// Blockwise Schur complement
|
||||
for (size_t i1 = 0; i1 < numKeys; i1++) { // for each camera
|
||||
|
||||
const Matrix2D& Fi1 = Fblocks.at(i1).second;
|
||||
const Matrix23 Ei1_P = E.block<2, 3>(2 * i1, 0) * P;
|
||||
|
||||
// D = (Dx2) * (2)
|
||||
// (augmentedHessian.matrix()).block<D,1> (i1,numKeys+1) = Fi1.transpose() * b.segment < 2 > (2 * i1); // F' * b
|
||||
augmentedHessian(i1,numKeys) = Fi1.transpose() * b.segment < 2 > (2 * i1) // F' * b
|
||||
- Fi1.transpose() * (Ei1_P * (E.transpose() * b)); // D = (Dx2) * (2x3) * (3*2m) * (2m x 1)
|
||||
|
||||
// (DxD) = (Dx2) * ( (2xD) - (2x3) * (3x2) * (2xD) )
|
||||
augmentedHessian(i1,i1) =
|
||||
Fi1.transpose() * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1);
|
||||
|
||||
// upper triangular part of the hessian
|
||||
for (size_t i2 = i1+1; i2 < numKeys; i2++) { // for each camera
|
||||
const Matrix2D& Fi2 = Fblocks.at(i2).second;
|
||||
|
||||
// (DxD) = (Dx2) * ( (2x2) * (2xD) )
|
||||
augmentedHessian(i1,i2) =
|
||||
-Fi1.transpose() * (Ei1_P * E.block<2, 3>(2 * i2, 0).transpose() * Fi2);
|
||||
}
|
||||
} // end of for over cameras
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
void sparseSchurComplement(const std::vector<KeyMatrix2D>& Fblocks,
|
||||
const Matrix& E, const Matrix& P /*Point Covariance*/, const Vector& b,
|
||||
/*output ->*/std::vector<Matrix>& Gs, std::vector<Vector>& gs) const {
|
||||
// Schur complement trick
|
||||
// Gs = F' * F - F' * E * P * E' * F
|
||||
// gs = F' * (b - E * P * E' * b)
|
||||
|
||||
// a single point is observed in numKeys cameras
|
||||
size_t numKeys = this->keys_.size();
|
||||
|
||||
int GsIndex = 0;
|
||||
// Blockwise Schur complement
|
||||
for (size_t i1 = 0; i1 < numKeys; i1++) { // for each camera
|
||||
// GsIndex points to the upper triangular blocks
|
||||
// 0 1 2 3 4
|
||||
// X 5 6 7 8
|
||||
// X X 9 10 11
|
||||
// X X X 12 13
|
||||
// X X X X 14
|
||||
const Matrix2D& Fi1 = Fblocks.at(i1).second;
|
||||
|
||||
const Matrix23 Ei1_P = E.block<2, 3>(2 * i1, 0) * P;
|
||||
|
||||
{ // for i1 = i2
|
||||
// D = (Dx2) * (2)
|
||||
gs.at(i1) = Fi1.transpose() * b.segment < 2 > (2 * i1); // F' * b
|
||||
// D = (Dx2) * (2x3) * (3*2m) * (2m x 1)
|
||||
gs.at(i1) -= Fi1.transpose() * (Ei1_P * (E.transpose() * b));
|
||||
|
||||
// (DxD) = (Dx2) * ( (2xD) - (2x3) * (3x2) * (2xD) )
|
||||
Gs.at(GsIndex) = Fi1.transpose() * (Fi1 - Ei1_P * E.block<2, 3>(2 * i1, 0).transpose() * Fi1);
|
||||
GsIndex++;
|
||||
}
|
||||
// upper triangular part of the hessian
|
||||
for (size_t i2 = i1+1; i2 < numKeys; i2++) { // for each camera
|
||||
const Matrix2D& Fi2 = Fblocks.at(i2).second;
|
||||
|
||||
// (DxD) = (Dx2) * ( (2x2) * (2xD) )
|
||||
Gs.at(GsIndex) = -Fi1.transpose() * (Ei1_P * E.block<2, 3>(2 * i2, 0).transpose() * Fi2);
|
||||
GsIndex++;
|
||||
}
|
||||
} // end of for over cameras
|
||||
}
|
||||
// ****************************************************************************************************
|
||||
boost::shared_ptr<ImplicitSchurFactor<D> > createImplicitSchurFactor(
|
||||
const Cameras& cameras, const Point3& point, double lambda = 0.0,
|
||||
bool diagonalDamping = false) const {
|
||||
typename boost::shared_ptr<ImplicitSchurFactor<D> > f(
|
||||
new ImplicitSchurFactor<D>());
|
||||
computeJacobians(f->Fblocks(), f->E(), f->PointCovariance(), f->b(),
|
||||
cameras, point, lambda, diagonalDamping);
|
||||
f->initKeys();
|
||||
return f;
|
||||
}
|
||||
|
||||
// ****************************************************************************************************
|
||||
boost::shared_ptr<JacobianFactorQ<D> > createJacobianQFactor(
|
||||
const Cameras& cameras, const Point3& point, double lambda = 0.0,
|
||||
bool diagonalDamping = false) const {
|
||||
std::vector < KeyMatrix2D > Fblocks;
|
||||
Matrix E;
|
||||
Matrix3 PointCov;
|
||||
Vector b;
|
||||
computeJacobians(Fblocks, E, PointCov, b, cameras, point, lambda,
|
||||
diagonalDamping);
|
||||
return boost::make_shared < JacobianFactorQ<D> > (Fblocks, E, PointCov, b);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
/// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template<class ARCHIVE>
|
||||
void serialize(ARCHIVE & ar, const unsigned int version) {
|
||||
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar & BOOST_SERIALIZATION_NVP(measured_);
|
||||
ar & BOOST_SERIALIZATION_NVP(body_P_sensor_);
|
||||
}
|
||||
};
|
||||
|
||||
} // \ namespace gtsam
|
||||
|
|
@ -0,0 +1,663 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SmartProjectionFactor.h
|
||||
* @brief Base class to create smart factors on poses or cameras
|
||||
* @author Luca Carlone
|
||||
* @author Zsolt Kira
|
||||
* @author Frank Dellaert
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "SmartFactorBase.h"
|
||||
|
||||
#include <gtsam_unstable/geometry/triangulation.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/inference/Symbol.h>
|
||||
#include <gtsam/slam/dataset.h>
|
||||
#include <gtsam_unstable/geometry/triangulation.h>
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
#include <boost/make_shared.hpp>
|
||||
#include <vector>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* Structure for storing some state memory, used to speed up optimization
|
||||
* @addtogroup SLAM
|
||||
*/
|
||||
class SmartProjectionFactorState {
|
||||
|
||||
protected:
|
||||
|
||||
public:
|
||||
|
||||
SmartProjectionFactorState() {
|
||||
}
|
||||
// Hessian representation (after Schur complement)
|
||||
bool calculatedHessian;
|
||||
Matrix H;
|
||||
Vector gs_vector;
|
||||
std::vector<Matrix> Gs;
|
||||
std::vector<Vector> gs;
|
||||
double f;
|
||||
};
|
||||
|
||||
/**
|
||||
* SmartProjectionFactor: triangulates point
|
||||
* TODO: why LANDMARK parameter?
|
||||
*/
|
||||
template<class POSE, class LANDMARK, class CALIBRATION, size_t D>
|
||||
class SmartProjectionFactor: public SmartFactorBase<POSE, CALIBRATION, D> {
|
||||
protected:
|
||||
|
||||
// Some triangulation parameters
|
||||
const double rankTolerance_; ///< threshold to decide whether triangulation is degenerate_
|
||||
const double retriangulationThreshold_; ///< threshold to decide whether to re-triangulate
|
||||
mutable std::vector<Pose3> cameraPosesTriangulation_; ///< current triangulation poses
|
||||
|
||||
const bool manageDegeneracy_; ///< if set to true will use the rotation-only version for degenerate cases
|
||||
|
||||
const bool enableEPI_; ///< if set to true, will refine triangulation using LM
|
||||
|
||||
const double linearizationThreshold_; ///< threshold to decide whether to re-linearize
|
||||
mutable std::vector<Pose3> cameraPosesLinearization_; ///< current linearization poses
|
||||
|
||||
mutable Point3 point_; ///< Current estimate of the 3D point
|
||||
|
||||
mutable bool degenerate_;
|
||||
mutable bool cheiralityException_;
|
||||
|
||||
// verbosity handling for Cheirality Exceptions
|
||||
const bool throwCheirality_; ///< If true, rethrows Cheirality exceptions (default: false)
|
||||
const bool verboseCheirality_; ///< If true, prints text for Cheirality exceptions (default: false)
|
||||
|
||||
boost::shared_ptr<SmartProjectionFactorState> state_;
|
||||
|
||||
/// shorthand for smart projection factor state variable
|
||||
typedef boost::shared_ptr<SmartProjectionFactorState> SmartFactorStatePtr;
|
||||
|
||||
/// shorthand for base class type
|
||||
typedef SmartFactorBase<POSE, CALIBRATION, D> Base;
|
||||
|
||||
/// shorthand for this class
|
||||
typedef SmartProjectionFactor<POSE, LANDMARK, CALIBRATION, D> This;
|
||||
|
||||
public:
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
/// shorthand for a pinhole camera
|
||||
typedef PinholeCamera<CALIBRATION> Camera;
|
||||
typedef std::vector<Camera> Cameras;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param rankTol tolerance used to check if point triangulation is degenerate
|
||||
* @param linThreshold threshold on relative pose changes used to decide whether to relinearize (selective relinearization)
|
||||
* @param manageDegeneracy is true, in presence of degenerate triangulation, the factor is converted to a rotation-only constraint,
|
||||
* otherwise the factor is simply neglected
|
||||
* @param enableEPI if set to true linear triangulation is refined with embedded LM iterations
|
||||
* @param body_P_sensor is the transform from body to sensor frame (default identity)
|
||||
*/
|
||||
SmartProjectionFactor(const double rankTol, const double linThreshold,
|
||||
const bool manageDegeneracy, const bool enableEPI,
|
||||
boost::optional<POSE> body_P_sensor = boost::none,
|
||||
SmartFactorStatePtr state = SmartFactorStatePtr(
|
||||
new SmartProjectionFactorState())) :
|
||||
Base(body_P_sensor), rankTolerance_(rankTol), retriangulationThreshold_(
|
||||
1e-5), manageDegeneracy_(manageDegeneracy), enableEPI_(enableEPI), linearizationThreshold_(
|
||||
linThreshold), degenerate_(false), cheiralityException_(false), throwCheirality_(
|
||||
false), verboseCheirality_(false), state_(state) {
|
||||
}
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~SmartProjectionFactor() {
|
||||
}
|
||||
|
||||
/**
|
||||
* print
|
||||
* @param s optional string naming the factor
|
||||
* @param keyFormatter optional formatter useful for printing Symbols
|
||||
*/
|
||||
void print(const std::string& s = "", const KeyFormatter& keyFormatter =
|
||||
DefaultKeyFormatter) const {
|
||||
std::cout << s << "SmartProjectionFactor, z = \n";
|
||||
std::cout << "rankTolerance_ = " << rankTolerance_ << std::endl;
|
||||
std::cout << "degenerate_ = " << degenerate_ << std::endl;
|
||||
std::cout << "cheiralityException_ = " << cheiralityException_ << std::endl;
|
||||
Base::print("", keyFormatter);
|
||||
}
|
||||
|
||||
/// Check if the new linearization point_ is the same as the one used for previous triangulation
|
||||
bool decideIfTriangulate(const Cameras& cameras) const {
|
||||
// several calls to linearize will be done from the same linearization point_, hence it is not needed to re-triangulate
|
||||
// Note that this is not yet "selecting linearization", that will come later, and we only check if the
|
||||
// current linearization is the "same" (up to tolerance) w.r.t. the last time we triangulated the point_
|
||||
|
||||
size_t m = cameras.size();
|
||||
|
||||
bool retriangulate = false;
|
||||
|
||||
// if we do not have a previous linearization point_ or the new linearization point_ includes more poses
|
||||
if (cameraPosesTriangulation_.empty()
|
||||
|| cameras.size() != cameraPosesTriangulation_.size())
|
||||
retriangulate = true;
|
||||
|
||||
if (!retriangulate) {
|
||||
for (size_t i = 0; i < cameras.size(); i++) {
|
||||
if (!cameras[i].pose().equals(cameraPosesTriangulation_[i],
|
||||
retriangulationThreshold_)) {
|
||||
retriangulate = true; // at least two poses are different, hence we retriangulate
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (retriangulate) { // we store the current poses used for triangulation
|
||||
cameraPosesTriangulation_.clear();
|
||||
cameraPosesTriangulation_.reserve(m);
|
||||
for (size_t i = 0; i < m; i++)
|
||||
// cameraPosesTriangulation_[i] = cameras[i].pose();
|
||||
cameraPosesTriangulation_.push_back(cameras[i].pose());
|
||||
}
|
||||
|
||||
return retriangulate; // if we arrive to this point_ all poses are the same and we don't need re-triangulation
|
||||
}
|
||||
|
||||
/// This function checks if the new linearization point_ is 'close' to the previous one used for linearization
|
||||
bool decideIfLinearize(const Cameras& cameras) const {
|
||||
// "selective linearization"
|
||||
// The function evaluates how close are the old and the new poses, transformed in the ref frame of the first pose
|
||||
// (we only care about the "rigidity" of the poses, not about their absolute pose)
|
||||
|
||||
if (this->linearizationThreshold_ < 0) //by convention if linearizationThreshold is negative we always relinearize
|
||||
return true;
|
||||
|
||||
// if we do not have a previous linearization point_ or the new linearization point_ includes more poses
|
||||
if (cameraPosesLinearization_.empty()
|
||||
|| (cameras.size() != cameraPosesLinearization_.size()))
|
||||
return true;
|
||||
|
||||
Pose3 firstCameraPose, firstCameraPoseOld;
|
||||
for (size_t i = 0; i < cameras.size(); i++) {
|
||||
|
||||
if (i == 0) { // we store the initial pose, this is useful for selective re-linearization
|
||||
firstCameraPose = cameras[i].pose();
|
||||
firstCameraPoseOld = cameraPosesLinearization_[i];
|
||||
continue;
|
||||
}
|
||||
|
||||
// we compare the poses in the frame of the first pose
|
||||
Pose3 localCameraPose = firstCameraPose.between(cameras[i].pose());
|
||||
Pose3 localCameraPoseOld = firstCameraPoseOld.between(
|
||||
cameraPosesLinearization_[i]);
|
||||
if (!localCameraPose.equals(localCameraPoseOld,
|
||||
this->linearizationThreshold_))
|
||||
return true; // at least two "relative" poses are different, hence we re-linearize
|
||||
}
|
||||
return false; // if we arrive to this point_ all poses are the same and we don't need re-linearize
|
||||
}
|
||||
|
||||
/// triangulateSafe
|
||||
size_t triangulateSafe(const Values& values) const {
|
||||
return triangulateSafe(this->cameras(values));
|
||||
}
|
||||
|
||||
/// triangulateSafe
|
||||
size_t triangulateSafe(const Cameras& cameras) const {
|
||||
|
||||
size_t m = cameras.size();
|
||||
if (m < 2) { // if we have a single pose the corresponding factor is uninformative
|
||||
degenerate_ = true;
|
||||
return m;
|
||||
}
|
||||
bool retriangulate = decideIfTriangulate(cameras);
|
||||
|
||||
if (retriangulate) {
|
||||
// We triangulate the 3D position of the landmark
|
||||
try {
|
||||
// std::cout << "triangulatePoint3 i \n" << rankTolerance << std::endl;
|
||||
point_ = triangulatePoint3<CALIBRATION>(cameras, this->measured_,
|
||||
rankTolerance_, enableEPI_);
|
||||
degenerate_ = false;
|
||||
cheiralityException_ = false;
|
||||
} catch (TriangulationUnderconstrainedException& e) {
|
||||
// if TriangulationUnderconstrainedException can be
|
||||
// 1) There is a single pose for triangulation - this should not happen because we checked the number of poses before
|
||||
// 2) The rank of the matrix used for triangulation is < 3: rotation-only, parallel cameras (or motion towards the landmark)
|
||||
// in the second case we want to use a rotation-only smart factor
|
||||
degenerate_ = true;
|
||||
cheiralityException_ = false;
|
||||
} catch (TriangulationCheiralityException& e) {
|
||||
// point is behind one of the cameras: can be the case of close-to-parallel cameras or may depend on outliers
|
||||
// we manage this case by either discarding the smart factor, or imposing a rotation-only constraint
|
||||
cheiralityException_ = true;
|
||||
}
|
||||
}
|
||||
return m;
|
||||
}
|
||||
|
||||
/// triangulate
|
||||
bool triangulateForLinearize(const Cameras& cameras) const {
|
||||
|
||||
bool isDebug = false;
|
||||
size_t nrCameras = this->triangulateSafe(cameras);
|
||||
|
||||
if (nrCameras < 2
|
||||
|| (!this->manageDegeneracy_
|
||||
&& (this->cheiralityException_ || this->degenerate_))) {
|
||||
if (isDebug) {
|
||||
std::cout << "createImplicitSchurFactor: degenerate configuration"
|
||||
<< std::endl;
|
||||
}
|
||||
return false;
|
||||
} else {
|
||||
|
||||
// instead, if we want to manage the exception..
|
||||
if (this->cheiralityException_ || this->degenerate_) { // if we want to manage the exceptions with rotation-only factors
|
||||
this->degenerate_ = true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/// linearize returns a Hessianfactor that is an approximation of error(p)
|
||||
boost::shared_ptr<RegularHessianFactor<D> > createHessianFactor(
|
||||
const Cameras& cameras, const double lambda = 0.0) const {
|
||||
|
||||
bool isDebug = false;
|
||||
int numKeys = this->keys_.size();
|
||||
// Create structures for Hessian Factors
|
||||
std::vector < Key > js;
|
||||
std::vector < Matrix > Gs(numKeys * (numKeys + 1) / 2);
|
||||
std::vector < Vector > gs(numKeys);
|
||||
|
||||
if (this->measured_.size() != cameras.size()) {
|
||||
std::cout
|
||||
<< "SmartProjectionHessianFactor: this->measured_.size() inconsistent with input"
|
||||
<< std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
this->triangulateSafe(cameras);
|
||||
|
||||
if (numKeys < 2
|
||||
|| (!this->manageDegeneracy_
|
||||
&& (this->cheiralityException_ || this->degenerate_))) {
|
||||
// std::cout << "In linearize: exception" << std::endl;
|
||||
BOOST_FOREACH(gtsam::Matrix& m, Gs)
|
||||
m = zeros(D, D);
|
||||
BOOST_FOREACH(Vector& v, gs)
|
||||
v = zero(D);
|
||||
return boost::make_shared<RegularHessianFactor<D> >(this->keys_, Gs, gs,
|
||||
0.0);
|
||||
}
|
||||
|
||||
// instead, if we want to manage the exception..
|
||||
if (this->cheiralityException_ || this->degenerate_) { // if we want to manage the exceptions with rotation-only factors
|
||||
this->degenerate_ = true;
|
||||
}
|
||||
|
||||
bool doLinearize = this->decideIfLinearize(cameras);
|
||||
|
||||
if (this->linearizationThreshold_ >= 0 && doLinearize) // if we apply selective relinearization and we need to relinearize
|
||||
for (size_t i = 0; i < cameras.size(); i++)
|
||||
this->cameraPosesLinearization_[i] = cameras[i].pose();
|
||||
|
||||
if (!doLinearize) { // return the previous Hessian factor
|
||||
std::cout << "=============================" << std::endl;
|
||||
std::cout << "doLinearize " << doLinearize << std::endl;
|
||||
std::cout << "this->linearizationThreshold_ "
|
||||
<< this->linearizationThreshold_ << std::endl;
|
||||
std::cout << "this->degenerate_ " << this->degenerate_ << std::endl;
|
||||
std::cout
|
||||
<< "something wrong in SmartProjectionHessianFactor: selective relinearization should be disabled"
|
||||
<< std::endl;
|
||||
exit(1);
|
||||
return boost::make_shared<RegularHessianFactor<D> >(this->keys_,
|
||||
this->state_->Gs, this->state_->gs, this->state_->f);
|
||||
}
|
||||
|
||||
// ==================================================================
|
||||
Matrix F, E;
|
||||
Matrix3 PointCov;
|
||||
Vector b;
|
||||
double f = computeJacobians(F, E, PointCov, b, cameras, lambda);
|
||||
|
||||
// Schur complement trick
|
||||
// Frank says: should be possible to do this more efficiently?
|
||||
// And we care, as in grouped factors this is called repeatedly
|
||||
Matrix H(D * numKeys, D * numKeys);
|
||||
Vector gs_vector;
|
||||
|
||||
H.noalias() = F.transpose() * (F - (E * (PointCov * (E.transpose() * F))));
|
||||
gs_vector.noalias() = F.transpose()
|
||||
* (b - (E * (PointCov * (E.transpose() * b))));
|
||||
if (isDebug)
|
||||
std::cout << "gs_vector size " << gs_vector.size() << std::endl;
|
||||
|
||||
// Populate Gs and gs
|
||||
int GsCount2 = 0;
|
||||
for (DenseIndex i1 = 0; i1 < numKeys; i1++) { // for each camera
|
||||
DenseIndex i1D = i1 * D;
|
||||
gs.at(i1) = gs_vector.segment < D > (i1D);
|
||||
for (DenseIndex i2 = 0; i2 < numKeys; i2++) {
|
||||
if (i2 >= i1) {
|
||||
Gs.at(GsCount2) = H.block < D, D > (i1D, i2 * D);
|
||||
GsCount2++;
|
||||
}
|
||||
}
|
||||
}
|
||||
// ==================================================================
|
||||
if (this->linearizationThreshold_ >= 0) { // if we do not use selective relinearization we don't need to store these variables
|
||||
this->state_->Gs = Gs;
|
||||
this->state_->gs = gs;
|
||||
this->state_->f = f;
|
||||
}
|
||||
return boost::make_shared<RegularHessianFactor<D> >(this->keys_, Gs, gs, f);
|
||||
}
|
||||
|
||||
// create factor
|
||||
boost::shared_ptr<ImplicitSchurFactor<D> > createImplicitSchurFactor(
|
||||
const Cameras& cameras, double lambda) const {
|
||||
if (triangulateForLinearize(cameras))
|
||||
return Base::createImplicitSchurFactor(cameras, point_, lambda);
|
||||
else
|
||||
return boost::shared_ptr<ImplicitSchurFactor<D> >();
|
||||
}
|
||||
|
||||
/// create factor
|
||||
boost::shared_ptr<JacobianFactorQ<D> > createJacobianQFactor(
|
||||
const Cameras& cameras, double lambda) const {
|
||||
if (triangulateForLinearize(cameras))
|
||||
return Base::createJacobianQFactor(cameras, point_, lambda);
|
||||
else
|
||||
return boost::shared_ptr<JacobianFactorQ<D> >();
|
||||
}
|
||||
|
||||
/// Create a factor, takes values
|
||||
boost::shared_ptr<JacobianFactorQ<D> > createJacobianQFactor(
|
||||
const Values& values, double lambda) const {
|
||||
Cameras myCameras;
|
||||
// TODO triangulate twice ??
|
||||
bool nonDegenerate = computeCamerasAndTriangulate(values, myCameras);
|
||||
if (nonDegenerate)
|
||||
return createJacobianQFactor(myCameras, lambda);
|
||||
else
|
||||
return boost::shared_ptr<JacobianFactorQ<D> >();
|
||||
}
|
||||
|
||||
/// Returns true if nonDegenerate
|
||||
bool computeCamerasAndTriangulate(const Values& values,
|
||||
Cameras& myCameras) const {
|
||||
Values valuesFactor;
|
||||
|
||||
// Select only the cameras
|
||||
BOOST_FOREACH(const Key key, this->keys_)
|
||||
valuesFactor.insert(key, values.at(key));
|
||||
|
||||
myCameras = this->cameras(valuesFactor);
|
||||
size_t nrCameras = this->triangulateSafe(myCameras);
|
||||
|
||||
if (nrCameras < 2
|
||||
|| (!this->manageDegeneracy_
|
||||
&& (this->cheiralityException_ || this->degenerate_)))
|
||||
return false;
|
||||
|
||||
// instead, if we want to manage the exception..
|
||||
if (this->cheiralityException_ || this->degenerate_) // if we want to manage the exceptions with rotation-only factors
|
||||
this->degenerate_ = true;
|
||||
|
||||
if (this->degenerate_) {
|
||||
std::cout << "SmartProjectionFactor: this is not ready" << std::endl;
|
||||
std::cout << "this->cheiralityException_ " << this->cheiralityException_
|
||||
<< std::endl;
|
||||
std::cout << "this->degenerate_ " << this->degenerate_ << std::endl;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Takes values
|
||||
bool computeEP(Matrix& E, Matrix& PointCov, const Values& values) const {
|
||||
Cameras myCameras;
|
||||
bool nonDegenerate = computeCamerasAndTriangulate(values, myCameras);
|
||||
if (nonDegenerate)
|
||||
computeEP(E, PointCov, myCameras);
|
||||
return nonDegenerate;
|
||||
}
|
||||
|
||||
/// Assumes non-degenerate !
|
||||
void computeEP(Matrix& E, Matrix& PointCov, const Cameras& cameras) const {
|
||||
return Base::computeEP(E, PointCov, cameras, point_);
|
||||
}
|
||||
|
||||
/// Version that takes values, and creates the point
|
||||
bool computeJacobians(std::vector<typename Base::KeyMatrix2D>& Fblocks,
|
||||
Matrix& E, Matrix& PointCov, Vector& b, const Values& values) const {
|
||||
Cameras myCameras;
|
||||
bool nonDegenerate = computeCamerasAndTriangulate(values, myCameras);
|
||||
if (nonDegenerate)
|
||||
computeJacobians(Fblocks, E, PointCov, b, myCameras, 0.0);
|
||||
return nonDegenerate;
|
||||
}
|
||||
|
||||
/// Compute F, E only (called below in both vanilla and SVD versions)
|
||||
/// Assumes the point has been computed
|
||||
/// Note E can be 2m*3 or 2m*2, in case point is degenerate
|
||||
double computeJacobians(std::vector<typename Base::KeyMatrix2D>& Fblocks,
|
||||
Matrix& E, Vector& b, const Cameras& cameras) const {
|
||||
|
||||
if (this->degenerate_) {
|
||||
std::cout << "manage degeneracy " << manageDegeneracy_ << std::endl;
|
||||
std::cout << "point " << point_ << std::endl;
|
||||
std::cout
|
||||
<< "SmartProjectionFactor: Management of degeneracy is disabled - not ready to be used"
|
||||
<< std::endl;
|
||||
if (D > 6) {
|
||||
std::cout
|
||||
<< "Management of degeneracy is not yet ready when one also optimizes for the calibration "
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
int numKeys = this->keys_.size();
|
||||
E = zeros(2 * numKeys, 2);
|
||||
b = zero(2 * numKeys);
|
||||
double f = 0;
|
||||
for (size_t i = 0; i < this->measured_.size(); i++) {
|
||||
if (i == 0) { // first pose
|
||||
this->point_ = cameras[i].backprojectPointAtInfinity(
|
||||
this->measured_.at(i));
|
||||
// 3D parametrization of point at infinity: [px py 1]
|
||||
}
|
||||
Matrix Fi, Ei;
|
||||
Vector bi = -(cameras[i].projectPointAtInfinity(this->point_, Fi, Ei)
|
||||
- this->measured_.at(i)).vector();
|
||||
|
||||
this->noise_.at(i)->WhitenSystem(Fi, Ei, bi);
|
||||
f += bi.squaredNorm();
|
||||
Fblocks.push_back(typename Base::KeyMatrix2D(this->keys_[i], Fi));
|
||||
E.block < 2, 2 > (2 * i, 0) = Ei;
|
||||
subInsert(b, bi, 2 * i);
|
||||
}
|
||||
return f;
|
||||
} else {
|
||||
// nondegenerate: just return Base version
|
||||
return Base::computeJacobians(Fblocks, E, b, cameras, point_);
|
||||
} // end else
|
||||
}
|
||||
|
||||
/// Version that computes PointCov, with optional lambda parameter
|
||||
double computeJacobians(std::vector<typename Base::KeyMatrix2D>& Fblocks,
|
||||
Matrix& E, Matrix& PointCov, Vector& b, const Cameras& cameras,
|
||||
const double lambda = 0.0) const {
|
||||
|
||||
double f = computeJacobians(Fblocks, E, b, cameras);
|
||||
|
||||
// Point covariance inv(E'*E)
|
||||
PointCov.noalias() = (E.transpose() * E + lambda * eye(E.cols())).inverse();
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
/// takes values
|
||||
bool computeJacobiansSVD(std::vector<typename Base::KeyMatrix2D>& Fblocks,
|
||||
Matrix& Enull, Vector& b, const Values& values) const {
|
||||
typename Base::Cameras myCameras;
|
||||
double good = computeCamerasAndTriangulate(values, myCameras);
|
||||
if (good)
|
||||
computeJacobiansSVD(Fblocks, Enull, b, myCameras);
|
||||
return true;
|
||||
}
|
||||
|
||||
/// SVD version
|
||||
double computeJacobiansSVD(std::vector<typename Base::KeyMatrix2D>& Fblocks,
|
||||
Matrix& Enull, Vector& b, const Cameras& cameras) const {
|
||||
return Base::computeJacobiansSVD(Fblocks, Enull, b, cameras, point_);
|
||||
}
|
||||
|
||||
/// Returns Matrix, TODO: maybe should not exist -> not sparse !
|
||||
// TODO should there be a lambda?
|
||||
double computeJacobiansSVD(Matrix& F, Matrix& Enull, Vector& b,
|
||||
const Cameras& cameras) const {
|
||||
return Base::computeJacobiansSVD(F, Enull, b, cameras, point_);
|
||||
}
|
||||
|
||||
/// Returns Matrix, TODO: maybe should not exist -> not sparse !
|
||||
double computeJacobians(Matrix& F, Matrix& E, Matrix3& PointCov, Vector& b,
|
||||
const Cameras& cameras, const double lambda) const {
|
||||
return Base::computeJacobians(F, E, PointCov, b, cameras, point_, lambda);
|
||||
}
|
||||
|
||||
/// Calculate vector of re-projection errors, before applying noise model
|
||||
/// Assumes triangulation was done and degeneracy handled
|
||||
Vector reprojectionError(const Cameras& cameras) const {
|
||||
return Base::reprojectionError(cameras, point_);
|
||||
}
|
||||
|
||||
/// Calculate vector of re-projection errors, before applying noise model
|
||||
Vector reprojectionError(const Values& values) const {
|
||||
Cameras myCameras;
|
||||
bool nonDegenerate = computeCamerasAndTriangulate(values, myCameras);
|
||||
if (nonDegenerate)
|
||||
return reprojectionError(myCameras);
|
||||
else
|
||||
return zero(myCameras.size() * 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate the error of the factor.
|
||||
* This is the log-likelihood, e.g. \f$ 0.5(h(x)-z)^2/\sigma^2 \f$ in case of Gaussian.
|
||||
* In this class, we take the raw prediction error \f$ h(x)-z \f$, ask the noise model
|
||||
* to transform it to \f$ (h(x)-z)^2/\sigma^2 \f$, and then multiply by 0.5.
|
||||
*/
|
||||
double totalReprojectionError(const Cameras& cameras,
|
||||
boost::optional<Point3> externalPoint = boost::none) const {
|
||||
|
||||
size_t nrCameras;
|
||||
if (externalPoint) {
|
||||
nrCameras = this->keys_.size();
|
||||
point_ = *externalPoint;
|
||||
degenerate_ = false;
|
||||
cheiralityException_ = false;
|
||||
} else {
|
||||
nrCameras = this->triangulateSafe(cameras);
|
||||
}
|
||||
|
||||
if (nrCameras < 2
|
||||
|| (!this->manageDegeneracy_
|
||||
&& (this->cheiralityException_ || this->degenerate_))) {
|
||||
// if we don't want to manage the exceptions we discard the factor
|
||||
// std::cout << "In error evaluation: exception" << std::endl;
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
if (this->cheiralityException_) { // if we want to manage the exceptions with rotation-only factors
|
||||
std::cout
|
||||
<< "SmartProjectionHessianFactor: cheirality exception (this should not happen if CheiralityException is disabled)!"
|
||||
<< std::endl;
|
||||
this->degenerate_ = true;
|
||||
}
|
||||
|
||||
if (this->degenerate_) {
|
||||
// return 0.0; // TODO: this maybe should be zero?
|
||||
std::cout
|
||||
<< "SmartProjectionHessianFactor: trying to manage degeneracy (this should not happen is manageDegeneracy is disabled)!"
|
||||
<< std::endl;
|
||||
size_t i = 0;
|
||||
double overallError = 0;
|
||||
BOOST_FOREACH(const Camera& camera, cameras) {
|
||||
const Point2& zi = this->measured_.at(i);
|
||||
if (i == 0) // first pose
|
||||
this->point_ = camera.backprojectPointAtInfinity(zi); // 3D parametrization of point at infinity
|
||||
Point2 reprojectionError(
|
||||
camera.projectPointAtInfinity(this->point_) - zi);
|
||||
overallError += 0.5
|
||||
* this->noise_.at(i)->distance(reprojectionError.vector());
|
||||
i += 1;
|
||||
}
|
||||
return overallError;
|
||||
} else {
|
||||
// Just use version in base class
|
||||
return Base::totalReprojectionError(cameras, point_);
|
||||
}
|
||||
}
|
||||
|
||||
/// Cameras are computed in derived class
|
||||
virtual Cameras cameras(const Values& values) const = 0;
|
||||
|
||||
/** return the landmark */
|
||||
boost::optional<Point3> point() const {
|
||||
return point_;
|
||||
}
|
||||
|
||||
/** COMPUTE the landmark */
|
||||
boost::optional<Point3> point(const Values& values) const {
|
||||
triangulateSafe(values);
|
||||
return point_;
|
||||
}
|
||||
|
||||
/** return the degenerate state */
|
||||
inline bool isDegenerate() const {
|
||||
return (cheiralityException_ || degenerate_);
|
||||
}
|
||||
|
||||
/** return the cheirality status flag */
|
||||
inline bool isPointBehindCamera() const {
|
||||
return cheiralityException_;
|
||||
}
|
||||
/** return chirality verbosity */
|
||||
inline bool verboseCheirality() const {
|
||||
return verboseCheirality_;
|
||||
}
|
||||
|
||||
/** return flag for throwing cheirality exceptions */
|
||||
inline bool throwCheirality() const {
|
||||
return throwCheirality_;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
/// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template<class ARCHIVE>
|
||||
void serialize(ARCHIVE & ar, const unsigned int version) {
|
||||
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar & BOOST_SERIALIZATION_NVP(throwCheirality_);
|
||||
ar & BOOST_SERIALIZATION_NVP(verboseCheirality_);
|
||||
}
|
||||
};
|
||||
|
||||
} // \ namespace gtsam
|
||||
|
|
@ -0,0 +1,177 @@
|
|||
/* ----------------------------------------------------------------------------
|
||||
|
||||
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
|
||||
* Atlanta, Georgia 30332-0415
|
||||
* All Rights Reserved
|
||||
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
|
||||
|
||||
* See LICENSE for the license information
|
||||
|
||||
* -------------------------------------------------------------------------- */
|
||||
|
||||
/**
|
||||
* @file SmartProjectionPoseFactor.h
|
||||
* @brief Produces an Hessian factors on POSES from monocular measurements of a single landmark
|
||||
* @author Luca Carlone
|
||||
* @author Chris Beall
|
||||
* @author Zsolt Kira
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "SmartProjectionFactor.h"
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
/**
|
||||
* The calibration is known here. The factor only constraints poses (variable dimension is 6)
|
||||
* @addtogroup SLAM
|
||||
*/
|
||||
template<class POSE, class LANDMARK, class CALIBRATION>
|
||||
class SmartProjectionPoseFactor: public SmartProjectionFactor<POSE, LANDMARK, CALIBRATION, 6> {
|
||||
protected:
|
||||
|
||||
// Known calibration
|
||||
std::vector<boost::shared_ptr<CALIBRATION> > K_all_; ///< shared pointer to calibration object (one for each camera)
|
||||
|
||||
public:
|
||||
|
||||
/// shorthand for base class type
|
||||
typedef SmartProjectionFactor<POSE, LANDMARK, CALIBRATION, 6> Base;
|
||||
|
||||
/// shorthand for this class
|
||||
typedef SmartProjectionPoseFactor<POSE, LANDMARK, CALIBRATION> This;
|
||||
|
||||
/// shorthand for a smart pointer to a factor
|
||||
typedef boost::shared_ptr<This> shared_ptr;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param rankTol tolerance used to check if point triangulation is degenerate
|
||||
* @param linThreshold threshold on relative pose changes used to decide whether to relinearize (selective relinearization)
|
||||
* @param manageDegeneracy is true, in presence of degenerate triangulation, the factor is converted to a rotation-only constraint,
|
||||
* otherwise the factor is simply neglected
|
||||
* @param enableEPI if set to true linear triangulation is refined with embedded LM iterations
|
||||
* @param body_P_sensor is the transform from body to sensor frame (default identity)
|
||||
*/
|
||||
SmartProjectionPoseFactor(const double rankTol = 1,
|
||||
const double linThreshold = -1, const bool manageDegeneracy = false,
|
||||
const bool enableEPI = false, boost::optional<POSE> body_P_sensor = boost::none) :
|
||||
Base(rankTol, linThreshold, manageDegeneracy, enableEPI, body_P_sensor) {}
|
||||
|
||||
/** Virtual destructor */
|
||||
virtual ~SmartProjectionPoseFactor() {}
|
||||
|
||||
/**
|
||||
* add a new measurement and pose key
|
||||
* @param measured is the 2m dimensional location of the projection of a single landmark in the m view (the measurement)
|
||||
* @param poseKey is the index corresponding to the camera observing the same landmark
|
||||
* @param noise_i is the measurement noise
|
||||
* @param K_i is the (known) camera calibration
|
||||
*/
|
||||
void add(const Point2 measured_i, const Key poseKey_i,
|
||||
const SharedNoiseModel noise_i,
|
||||
const boost::shared_ptr<CALIBRATION> K_i) {
|
||||
Base::add(measured_i, poseKey_i, noise_i);
|
||||
K_all_.push_back(K_i);
|
||||
}
|
||||
|
||||
/**
|
||||
* add a new measurements and pose keys
|
||||
* Variant of the previous one in which we include a set of measurements
|
||||
*/
|
||||
void add(std::vector<Point2> measurements, std::vector<Key> poseKeys,
|
||||
std::vector<SharedNoiseModel> noises,
|
||||
std::vector<boost::shared_ptr<CALIBRATION> > Ks) {
|
||||
Base::add(measurements, poseKeys, noises);
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
K_all_.push_back(Ks.at(i));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* add a new measurements and pose keys
|
||||
* Variant of the previous one in which we include a set of measurements with the same noise and calibration
|
||||
*/
|
||||
void add(std::vector<Point2> measurements, std::vector<Key> poseKeys,
|
||||
const SharedNoiseModel noise, const boost::shared_ptr<CALIBRATION> K) {
|
||||
for (size_t i = 0; i < measurements.size(); i++) {
|
||||
Base::add(measurements.at(i), poseKeys.at(i), noise);
|
||||
K_all_.push_back(K);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* print
|
||||
* @param s optional string naming the factor
|
||||
* @param keyFormatter optional formatter useful for printing Symbols
|
||||
*/
|
||||
void print(const std::string& s = "", const KeyFormatter& keyFormatter =
|
||||
DefaultKeyFormatter) const {
|
||||
std::cout << s << "SmartProjectionPoseFactor, z = \n ";
|
||||
BOOST_FOREACH(const boost::shared_ptr<CALIBRATION>& K, K_all_)
|
||||
K->print("calibration = ");
|
||||
Base::print("", keyFormatter);
|
||||
}
|
||||
|
||||
/// equals
|
||||
virtual bool equals(const NonlinearFactor& p, double tol = 1e-9) const {
|
||||
const This *e = dynamic_cast<const This*>(&p);
|
||||
|
||||
return e && Base::equals(p, tol);
|
||||
}
|
||||
|
||||
/// get the dimension of the factor
|
||||
virtual size_t dim() const {
|
||||
return 6 * this->keys_.size();
|
||||
}
|
||||
|
||||
// Collect all cameras
|
||||
typename Base::Cameras cameras(const Values& values) const {
|
||||
typename Base::Cameras cameras;
|
||||
size_t i=0;
|
||||
BOOST_FOREACH(const Key& k, this->keys_) {
|
||||
Pose3 pose = values.at<Pose3>(k);
|
||||
typename Base::Camera camera(pose, *K_all_[i++]);
|
||||
cameras.push_back(camera);
|
||||
}
|
||||
return cameras;
|
||||
}
|
||||
|
||||
/**
|
||||
* linearize returns a Hessian factor contraining the poses
|
||||
*/
|
||||
virtual boost::shared_ptr<GaussianFactor> linearize(
|
||||
const Values& values) const {
|
||||
return this->createHessianFactor(cameras(values));
|
||||
}
|
||||
|
||||
/**
|
||||
* error calculates the error of the factor.
|
||||
*/
|
||||
virtual double error(const Values& values) const {
|
||||
if (this->active(values)) {
|
||||
return this->totalReprojectionError(cameras(values));
|
||||
} else { // else of active flag
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
/** return the calibration object */
|
||||
inline const boost::shared_ptr<CALIBRATION> calibration() const {
|
||||
return K_all_;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
/// Serialization function
|
||||
friend class boost::serialization::access;
|
||||
template<class ARCHIVE>
|
||||
void serialize(ARCHIVE & ar, const unsigned int version) {
|
||||
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(Base);
|
||||
ar & BOOST_SERIALIZATION_NVP(K_all_);
|
||||
}
|
||||
|
||||
}; // end of class declaration
|
||||
|
||||
} // \ namespace gtsam
|
||||
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue