Commented out Hessian, cholesky, and gradient code so that library compiles

release/4.3a0
Richard Roberts 2013-07-09 17:50:32 +00:00
parent 8c2b53ad3b
commit b0836ac57c
15 changed files with 497 additions and 1399 deletions

View File

@ -23,35 +23,44 @@ namespace gtsam {
/* ************************************************************************* */
template<class DERIVED, class FACTORGRAPH, class BAYESNET>
std::vector<Key>
BayesTreeCliqueBaseUnordered<DERIVED, FACTORGRAPH, BAYESNET>::separator_setminus_B(derived_ptr B) const
bool BayesTreeCliqueBaseUnordered<DERIVED, FACTORGRAPH, BAYESNET>::equals(
const DERIVED& other, double tol = 1e-9) const
{
FastSet<Key> p_F_S_parents(this->conditional()->beginParents(), this->conditional()->endParents());
FastSet<Key> indicesB(B->conditional()->begin(), B->conditional()->end());
std::vector<Key> S_setminus_B;
std::set_difference(p_F_S_parents.begin(), p_F_S_parents.end(),
indicesB.begin(), indicesB.end(), back_inserter(S_setminus_B));
return S_setminus_B;
return (!conditional_ && !other.conditional())
|| conditional_->equals(*other.conditional(), tol);
}
/* ************************************************************************* */
template<class DERIVED, class FACTORGRAPH, class BAYESNET>
std::vector<Key> BayesTreeCliqueBaseUnordered<DERIVED, FACTORGRAPH, BAYESNET>::shortcut_indices(
derived_ptr B, const FactorGraphType& p_Cp_B) const
{
gttic(shortcut_indices);
FastSet<Key> allKeys = p_Cp_B.keys();
FastSet<Key> indicesB(B->conditional()->begin(), B->conditional()->end());
std::vector<Key> S_setminus_B = separator_setminus_B(B);
std::vector<Key> keep;
// keep = S\B intersect allKeys (S_setminus_B is already sorted)
std::set_intersection(S_setminus_B.begin(), S_setminus_B.end(), //
allKeys.begin(), allKeys.end(), back_inserter(keep));
// keep += B intersect allKeys
std::set_intersection(indicesB.begin(), indicesB.end(), //
allKeys.begin(), allKeys.end(), back_inserter(keep));
return keep;
}
///* ************************************************************************* */
//template<class DERIVED, class FACTORGRAPH, class BAYESNET>
//std::vector<Key>
// BayesTreeCliqueBaseUnordered<DERIVED, FACTORGRAPH, BAYESNET>::separator_setminus_B(derived_ptr B) const
//{
// FastSet<Key> p_F_S_parents(this->conditional()->beginParents(), this->conditional()->endParents());
// FastSet<Key> indicesB(B->conditional()->begin(), B->conditional()->end());
// std::vector<Key> S_setminus_B;
// std::set_difference(p_F_S_parents.begin(), p_F_S_parents.end(),
// indicesB.begin(), indicesB.end(), back_inserter(S_setminus_B));
// return S_setminus_B;
//}
///* ************************************************************************* */
//template<class DERIVED, class FACTORGRAPH, class BAYESNET>
//std::vector<Key> BayesTreeCliqueBaseUnordered<DERIVED, FACTORGRAPH, BAYESNET>::shortcut_indices(
// derived_ptr B, const FactorGraphType& p_Cp_B) const
//{
// gttic(shortcut_indices);
// FastSet<Key> allKeys = p_Cp_B.keys();
// FastSet<Key> indicesB(B->conditional()->begin(), B->conditional()->end());
// std::vector<Key> S_setminus_B = separator_setminus_B(B);
// std::vector<Key> keep;
// // keep = S\B intersect allKeys (S_setminus_B is already sorted)
// std::set_intersection(S_setminus_B.begin(), S_setminus_B.end(), //
// allKeys.begin(), allKeys.end(), back_inserter(keep));
// // keep += B intersect allKeys
// std::set_intersection(indicesB.begin(), indicesB.end(), //
// allKeys.begin(), allKeys.end(), back_inserter(keep));
// return keep;
//}
/* ************************************************************************* */
template<class DERIVED, class FACTORGRAPH, class BAYESNET>

View File

@ -137,18 +137,18 @@ namespace gtsam {
protected:
/// Calculate set \f$ S \setminus B \f$ for shortcut calculations
std::vector<Key> separator_setminus_B(derived_ptr B) const;
///// Calculate set \f$ S \setminus B \f$ for shortcut calculations
//std::vector<Key> separator_setminus_B(derived_ptr B) const;
/// Calculate set \f$ S_p \cap B \f$ for shortcut calculations
std::vector<Key> parent_separator_intersection_B(derived_ptr B) const;
///// Calculate set \f$ S_p \cap B \f$ for shortcut calculations
//std::vector<Key> parent_separator_intersection_B(derived_ptr B) const;
/**
* Determine variable indices to keep in recursive separator shortcut calculation
* The factor graph p_Cp_B has keys from the parent clique Cp and from B.
* But we only keep the variables not in S union B.
*/
std::vector<Key> shortcut_indices(derived_ptr B, const FactorGraphType& p_Cp_B) const;
///**
// * Determine variable indices to keep in recursive separator shortcut calculation
// * The factor graph p_Cp_B has keys from the parent clique Cp and from B.
// * But we only keep the variables not in S union B.
// */
//std::vector<Key> shortcut_indices(derived_ptr B, const FactorGraphType& p_Cp_B) const;
/** Non-recursive delete cached shortcuts and marginals - internal only. */
void deleteCachedShortcutsNonRecursive() { cachedSeparatorMarginal_ = boost::none; }

View File

@ -364,201 +364,201 @@ namespace gtsam {
fillNodesIndex(subtree); // Populate nodes index
}
/* ************************************************************************* */
// First finds clique marginal then marginalizes that
/* ************************************************************************* */
template<class CLIQUE>
typename BayesTreeUnordered<CLIQUE>::sharedFactor BayesTreeUnordered<CLIQUE>::marginalFactor(
Key j, Eliminate function) const
{
return boost::make_shared<FactorType>();
// gttic(BayesTree_marginalFactor);
// /* ************************************************************************* */
// // First finds clique marginal then marginalizes that
// /* ************************************************************************* */
// template<class CLIQUE>
// typename BayesTreeUnordered<CLIQUE>::sharedFactor BayesTreeUnordered<CLIQUE>::marginalFactor(
// Key j, Eliminate function) const
// {
// return boost::make_shared<FactorType>();
//// gttic(BayesTree_marginalFactor);
////
//// // get clique containing Index j
//// sharedClique clique = this->clique(j);
////
//// // calculate or retrieve its marginal P(C) = P(F,S)
////#ifdef OLD_SHORTCUT_MARGINALS
//// FactorGraph<FactorType> cliqueMarginal = clique->marginal(root_,function);
////#else
//// FactorGraph<FactorType> cliqueMarginal = clique->marginal2(root_,function);
////#endif
////
//// // Reduce the variable indices to start at zero
//// gttic(Reduce);
//// const Permutation reduction = internal::createReducingPermutation(cliqueMarginal.keys());
//// internal::Reduction inverseReduction = internal::Reduction::CreateAsInverse(reduction);
//// BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, cliqueMarginal) {
//// if(factor) factor->reduceWithInverse(inverseReduction); }
//// gttoc(Reduce);
////
//// // now, marginalize out everything that is not variable j
//// GenericSequentialSolver<FactorType> solver(cliqueMarginal);
//// boost::shared_ptr<FactorType> result = solver.marginalFactor(inverseReduction[j], function);
////
//// // Undo the reduction
//// gttic(Undo_Reduce);
//// result->permuteWithInverse(reduction);
//// BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, cliqueMarginal) {
//// if(factor) factor->permuteWithInverse(reduction); }
//// gttoc(Undo_Reduce);
//// return result;
// }
//
// // get clique containing Index j
// sharedClique clique = this->clique(j);
// /* ************************************************************************* */
// template<class CLIQUE>
// typename BayesTreeUnordered<CLIQUE>::sharedBayesNet BayesTreeUnordered<CLIQUE>::marginalBayesNet(
// Key j, Eliminate function) const
// {
// return boost::make_shared<BayesNetType>();
// //gttic(BayesTree_marginalBayesNet);
//
// // calculate or retrieve its marginal P(C) = P(F,S)
//#ifdef OLD_SHORTCUT_MARGINALS
// FactorGraph<FactorType> cliqueMarginal = clique->marginal(root_,function);
//#else
// FactorGraph<FactorType> cliqueMarginal = clique->marginal2(root_,function);
//#endif
// //// calculate marginal as a factor graph
// //FactorGraph<FactorType> fg;
// //fg.push_back(this->marginalFactor(j,function));
//
// // Reduce the variable indices to start at zero
// gttic(Reduce);
// const Permutation reduction = internal::createReducingPermutation(cliqueMarginal.keys());
// internal::Reduction inverseReduction = internal::Reduction::CreateAsInverse(reduction);
// BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, cliqueMarginal) {
// if(factor) factor->reduceWithInverse(inverseReduction); }
// gttoc(Reduce);
// //// Reduce the variable indices to start at zero
// //gttic(Reduce);
// //const Permutation reduction = internal::createReducingPermutation(fg.keys());
// //internal::Reduction inverseReduction = internal::Reduction::CreateAsInverse(reduction);
// //BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, fg) {
// // if(factor) factor->reduceWithInverse(inverseReduction); }
// //gttoc(Reduce);
//
// // now, marginalize out everything that is not variable j
// GenericSequentialSolver<FactorType> solver(cliqueMarginal);
// boost::shared_ptr<FactorType> result = solver.marginalFactor(inverseReduction[j], function);
// //// eliminate factor graph marginal to a Bayes net
// //boost::shared_ptr<BayesNet<CONDITIONAL> > bn = GenericSequentialSolver<FactorType>(fg).eliminate(function);
//
// // Undo the reduction
// gttic(Undo_Reduce);
// result->permuteWithInverse(reduction);
// BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, cliqueMarginal) {
// if(factor) factor->permuteWithInverse(reduction); }
// gttoc(Undo_Reduce);
// return result;
}
/* ************************************************************************* */
template<class CLIQUE>
typename BayesTreeUnordered<CLIQUE>::sharedBayesNet BayesTreeUnordered<CLIQUE>::marginalBayesNet(
Key j, Eliminate function) const
{
return boost::make_shared<BayesNetType>();
//gttic(BayesTree_marginalBayesNet);
//// calculate marginal as a factor graph
//FactorGraph<FactorType> fg;
//fg.push_back(this->marginalFactor(j,function));
//// Reduce the variable indices to start at zero
//gttic(Reduce);
//const Permutation reduction = internal::createReducingPermutation(fg.keys());
//internal::Reduction inverseReduction = internal::Reduction::CreateAsInverse(reduction);
//BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, fg) {
// if(factor) factor->reduceWithInverse(inverseReduction); }
//gttoc(Reduce);
//// eliminate factor graph marginal to a Bayes net
//boost::shared_ptr<BayesNet<CONDITIONAL> > bn = GenericSequentialSolver<FactorType>(fg).eliminate(function);
//// Undo the reduction
//gttic(Undo_Reduce);
//bn->permuteWithInverse(reduction);
//BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, fg) {
// if(factor) factor->permuteWithInverse(reduction); }
//gttoc(Undo_Reduce);
//return bn;
}
/* ************************************************************************* */
// Find two cliques, their joint, then marginalizes
/* ************************************************************************* */
template<class CLIQUE>
typename BayesTreeUnordered<CLIQUE>::sharedFactorGraph
BayesTreeUnordered<CLIQUE>::joint(Key j1, Key j2, Eliminate function) const {
return boost::make_shared<FactorGraphType>();
//gttic(BayesTree_joint);
//// get clique C1 and C2
//sharedClique C1 = (*this)[j1], C2 = (*this)[j2];
//gttic(Lowest_common_ancestor);
//// Find lowest common ancestor clique
//sharedClique B; {
// // Build two paths to the root
// FastList<sharedClique> path1, path2; {
// sharedClique p = C1;
// while(p) {
// path1.push_front(p);
// p = p->parent();
// }
// } {
// sharedClique p = C2;
// while(p) {
// path2.push_front(p);
// p = p->parent();
// }
// }
// // Find the path intersection
// B = this->root();
// typename FastList<sharedClique>::const_iterator p1 = path1.begin(), p2 = path2.begin();
// while(p1 != path1.end() && p2 != path2.end() && *p1 == *p2) {
// B = *p1;
// ++p1;
// ++p2;
// }
//}
//gttoc(Lowest_common_ancestor);
//// Compute marginal on lowest common ancestor clique
//gttic(LCA_marginal);
//FactorGraph<FactorType> p_B = B->marginal2(this->root(), function);
//gttoc(LCA_marginal);
//// Compute shortcuts of the requested cliques given the lowest common ancestor
//gttic(Clique_shortcuts);
//BayesNet<CONDITIONAL> p_C1_Bred = C1->shortcut(B, function);
//BayesNet<CONDITIONAL> p_C2_Bred = C2->shortcut(B, function);
//gttoc(Clique_shortcuts);
//// Factor the shortcuts to be conditioned on the full root
//// Get the set of variables to eliminate, which is C1\B.
//gttic(Full_root_factoring);
//sharedConditional p_C1_B; {
// std::vector<Index> C1_minus_B; {
// FastSet<Index> C1_minus_B_set(C1->conditional()->beginParents(), C1->conditional()->endParents());
// BOOST_FOREACH(const Index j, *B->conditional()) {
// C1_minus_B_set.erase(j); }
// C1_minus_B.assign(C1_minus_B_set.begin(), C1_minus_B_set.end());
// }
// // Factor into C1\B | B.
// FactorGraph<FactorType> temp_remaining;
// boost::tie(p_C1_B, temp_remaining) = FactorGraph<FactorType>(p_C1_Bred).eliminate(C1_minus_B, function);
//}
//sharedConditional p_C2_B; {
// std::vector<Index> C2_minus_B; {
// FastSet<Index> C2_minus_B_set(C2->conditional()->beginParents(), C2->conditional()->endParents());
// BOOST_FOREACH(const Index j, *B->conditional()) {
// C2_minus_B_set.erase(j); }
// C2_minus_B.assign(C2_minus_B_set.begin(), C2_minus_B_set.end());
// }
// // Factor into C2\B | B.
// FactorGraph<FactorType> temp_remaining;
// boost::tie(p_C2_B, temp_remaining) = FactorGraph<FactorType>(p_C2_Bred).eliminate(C2_minus_B, function);
//}
//gttoc(Full_root_factoring);
//gttic(Variable_joint);
//// Build joint on all involved variables
//FactorGraph<FactorType> p_BC1C2;
//p_BC1C2.push_back(p_B);
//p_BC1C2.push_back(p_C1_B->toFactor());
//p_BC1C2.push_back(p_C2_B->toFactor());
//if(C1 != B)
// p_BC1C2.push_back(C1->conditional()->toFactor());
//if(C2 != B)
// p_BC1C2.push_back(C2->conditional()->toFactor());
//// Reduce the variable indices to start at zero
//gttic(Reduce);
//const Permutation reduction = internal::createReducingPermutation(p_BC1C2.keys());
//internal::Reduction inverseReduction = internal::Reduction::CreateAsInverse(reduction);
//BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, p_BC1C2) {
// if(factor) factor->reduceWithInverse(inverseReduction); }
//std::vector<Index> js; js.push_back(inverseReduction[j1]); js.push_back(inverseReduction[j2]);
//gttoc(Reduce);
//// now, marginalize out everything that is not variable j
//GenericSequentialSolver<FactorType> solver(p_BC1C2);
//boost::shared_ptr<FactorGraph<FactorType> > result = solver.jointFactorGraph(js, function);
//// Undo the reduction
//gttic(Undo_Reduce);
//BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, *result) {
// if(factor) factor->permuteWithInverse(reduction); }
//BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, p_BC1C2) {
// if(factor) factor->permuteWithInverse(reduction); }
//gttoc(Undo_Reduce);
//return result;
}
/* ************************************************************************* */
template<class CLIQUE>
typename BayesTreeUnordered<CLIQUE>::sharedBayesNet BayesTreeUnordered<CLIQUE>::jointBayesNet(
Key j1, Key j2, Eliminate function) const
{
return boost::make_shared<BayesNetType>();
//// eliminate factor graph marginal to a Bayes net
//return GenericSequentialSolver<FactorType> (
// *this->joint(j1, j2, function)).eliminate(function);
}
// //// Undo the reduction
// //gttic(Undo_Reduce);
// //bn->permuteWithInverse(reduction);
// //BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, fg) {
// // if(factor) factor->permuteWithInverse(reduction); }
// //gttoc(Undo_Reduce);
// //return bn;
// }
//
// /* ************************************************************************* */
// // Find two cliques, their joint, then marginalizes
// /* ************************************************************************* */
// template<class CLIQUE>
// typename BayesTreeUnordered<CLIQUE>::sharedFactorGraph
// BayesTreeUnordered<CLIQUE>::joint(Key j1, Key j2, Eliminate function) const {
// return boost::make_shared<FactorGraphType>();
// //gttic(BayesTree_joint);
//
// //// get clique C1 and C2
// //sharedClique C1 = (*this)[j1], C2 = (*this)[j2];
//
// //gttic(Lowest_common_ancestor);
// //// Find lowest common ancestor clique
// //sharedClique B; {
// // // Build two paths to the root
// // FastList<sharedClique> path1, path2; {
// // sharedClique p = C1;
// // while(p) {
// // path1.push_front(p);
// // p = p->parent();
// // }
// // } {
// // sharedClique p = C2;
// // while(p) {
// // path2.push_front(p);
// // p = p->parent();
// // }
// // }
// // // Find the path intersection
// // B = this->root();
// // typename FastList<sharedClique>::const_iterator p1 = path1.begin(), p2 = path2.begin();
// // while(p1 != path1.end() && p2 != path2.end() && *p1 == *p2) {
// // B = *p1;
// // ++p1;
// // ++p2;
// // }
// //}
// //gttoc(Lowest_common_ancestor);
//
// //// Compute marginal on lowest common ancestor clique
// //gttic(LCA_marginal);
// //FactorGraph<FactorType> p_B = B->marginal2(this->root(), function);
// //gttoc(LCA_marginal);
//
// //// Compute shortcuts of the requested cliques given the lowest common ancestor
// //gttic(Clique_shortcuts);
// //BayesNet<CONDITIONAL> p_C1_Bred = C1->shortcut(B, function);
// //BayesNet<CONDITIONAL> p_C2_Bred = C2->shortcut(B, function);
// //gttoc(Clique_shortcuts);
//
// //// Factor the shortcuts to be conditioned on the full root
// //// Get the set of variables to eliminate, which is C1\B.
// //gttic(Full_root_factoring);
// //sharedConditional p_C1_B; {
// // std::vector<Index> C1_minus_B; {
// // FastSet<Index> C1_minus_B_set(C1->conditional()->beginParents(), C1->conditional()->endParents());
// // BOOST_FOREACH(const Index j, *B->conditional()) {
// // C1_minus_B_set.erase(j); }
// // C1_minus_B.assign(C1_minus_B_set.begin(), C1_minus_B_set.end());
// // }
// // // Factor into C1\B | B.
// // FactorGraph<FactorType> temp_remaining;
// // boost::tie(p_C1_B, temp_remaining) = FactorGraph<FactorType>(p_C1_Bred).eliminate(C1_minus_B, function);
// //}
// //sharedConditional p_C2_B; {
// // std::vector<Index> C2_minus_B; {
// // FastSet<Index> C2_minus_B_set(C2->conditional()->beginParents(), C2->conditional()->endParents());
// // BOOST_FOREACH(const Index j, *B->conditional()) {
// // C2_minus_B_set.erase(j); }
// // C2_minus_B.assign(C2_minus_B_set.begin(), C2_minus_B_set.end());
// // }
// // // Factor into C2\B | B.
// // FactorGraph<FactorType> temp_remaining;
// // boost::tie(p_C2_B, temp_remaining) = FactorGraph<FactorType>(p_C2_Bred).eliminate(C2_minus_B, function);
// //}
// //gttoc(Full_root_factoring);
//
// //gttic(Variable_joint);
// //// Build joint on all involved variables
// //FactorGraph<FactorType> p_BC1C2;
// //p_BC1C2.push_back(p_B);
// //p_BC1C2.push_back(p_C1_B->toFactor());
// //p_BC1C2.push_back(p_C2_B->toFactor());
// //if(C1 != B)
// // p_BC1C2.push_back(C1->conditional()->toFactor());
// //if(C2 != B)
// // p_BC1C2.push_back(C2->conditional()->toFactor());
//
// //// Reduce the variable indices to start at zero
// //gttic(Reduce);
// //const Permutation reduction = internal::createReducingPermutation(p_BC1C2.keys());
// //internal::Reduction inverseReduction = internal::Reduction::CreateAsInverse(reduction);
// //BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, p_BC1C2) {
// // if(factor) factor->reduceWithInverse(inverseReduction); }
// //std::vector<Index> js; js.push_back(inverseReduction[j1]); js.push_back(inverseReduction[j2]);
// //gttoc(Reduce);
//
// //// now, marginalize out everything that is not variable j
// //GenericSequentialSolver<FactorType> solver(p_BC1C2);
// //boost::shared_ptr<FactorGraph<FactorType> > result = solver.jointFactorGraph(js, function);
//
// //// Undo the reduction
// //gttic(Undo_Reduce);
// //BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, *result) {
// // if(factor) factor->permuteWithInverse(reduction); }
// //BOOST_FOREACH(const boost::shared_ptr<FactorType>& factor, p_BC1C2) {
// // if(factor) factor->permuteWithInverse(reduction); }
// //gttoc(Undo_Reduce);
// //return result;
// }
//
// /* ************************************************************************* */
// template<class CLIQUE>
// typename BayesTreeUnordered<CLIQUE>::sharedBayesNet BayesTreeUnordered<CLIQUE>::jointBayesNet(
// Key j1, Key j2, Eliminate function) const
// {
// return boost::make_shared<BayesNetType>();
// //// eliminate factor graph marginal to a Bayes net
// //return GenericSequentialSolver<FactorType> (
// // *this->joint(j1, j2, function)).eliminate(function);
// }
/* ************************************************************************* */
template<class CLIQUE>

View File

@ -154,27 +154,27 @@ namespace gtsam {
/** Collect number of cliques with cached separator marginals */
size_t numCachedSeparatorMarginals() const;
/** return marginal on any variable */
sharedFactor marginalFactor(Key j, Eliminate function) const;
///** return marginal on any variable */
//sharedFactor marginalFactor(Key j, Eliminate function) const;
/**
* return marginal on any variable, as a Bayes Net
* NOTE: this function calls marginal, and then eliminates it into a Bayes Net
* This is more expensive than the above function
*/
sharedBayesNet marginalBayesNet(Key j, Eliminate function) const;
///**
// * return marginal on any variable, as a Bayes Net
// * NOTE: this function calls marginal, and then eliminates it into a Bayes Net
// * This is more expensive than the above function
// */
//sharedBayesNet marginalBayesNet(Key j, Eliminate function) const;
/**
* return joint on two variables
* Limitation: can only calculate joint if cliques are disjoint or one of them is root
*/
sharedFactorGraph joint(Index j1, Index j2, Eliminate function) const;
///**
// * return joint on two variables
// * Limitation: can only calculate joint if cliques are disjoint or one of them is root
// */
//sharedFactorGraph joint(Index j1, Index j2, Eliminate function) const;
/**
* return joint on two variables as a BayesNet
* Limitation: can only calculate joint if cliques are disjoint or one of them is root
*/
sharedBayesNet jointBayesNet(Index j1, Index j2, Eliminate function) const;
///**
// * return joint on two variables as a BayesNet
// * Limitation: can only calculate joint if cliques are disjoint or one of them is root
// */
//sharedBayesNet jointBayesNet(Index j1, Index j2, Eliminate function) const;
/**
* Read only with side effects

View File

@ -78,32 +78,32 @@ namespace gtsam {
return gy;
}
/* ************************************************************************* */
VectorValuesUnordered GaussianBayesNetUnordered::optimizeGradientSearch() const
{
gttic(Compute_Gradient);
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
VectorValuesUnordered grad = gradientAtZero();
double gradientSqNorm = grad.dot(grad);
gttoc(Compute_Gradient);
///* ************************************************************************* */
//VectorValuesUnordered GaussianBayesNetUnordered::optimizeGradientSearch() const
//{
// gttic(Compute_Gradient);
// // Compute gradient (call gradientAtZero function, which is defined for various linear systems)
// VectorValuesUnordered grad = gradientAtZero();
// double gradientSqNorm = grad.dot(grad);
// gttoc(Compute_Gradient);
gttic(Compute_Rg);
// Compute R * g
Errors Rg = GaussianFactorGraphUnordered(*this) * grad;
gttoc(Compute_Rg);
// gttic(Compute_Rg);
// // Compute R * g
// Errors Rg = GaussianFactorGraphUnordered(*this) * grad;
// gttoc(Compute_Rg);
gttic(Compute_minimizing_step_size);
// Compute minimizing step size
double step = -gradientSqNorm / dot(Rg, Rg);
gttoc(Compute_minimizing_step_size);
// gttic(Compute_minimizing_step_size);
// // Compute minimizing step size
// double step = -gradientSqNorm / dot(Rg, Rg);
// gttoc(Compute_minimizing_step_size);
gttic(Compute_point);
// Compute steepest descent point
scal(step, grad);
gttoc(Compute_point);
// gttic(Compute_point);
// // Compute steepest descent point
// scal(step, grad);
// gttoc(Compute_point);
return grad;
}
// return grad;
//}
/* ************************************************************************* */
pair<Matrix,Vector> GaussianBayesNetUnordered::matrix() const
@ -111,16 +111,17 @@ namespace gtsam {
return GaussianFactorGraphUnordered(*this).jacobian();
}
/* ************************************************************************* */
double determinant(const GaussianBayesNet& bayesNet) {
double logDet = 0.0;
///* ************************************************************************* */
//VectorValuesUnordered GaussianBayesNetUnordered::gradient(const VectorValuesUnordered& x0) const
//{
// return GaussianFactorGraphUnordered(*this).gradient(x0);
//}
BOOST_FOREACH(boost::shared_ptr<const GaussianConditional> cg, bayesNet){
logDet += cg->get_R().diagonal().unaryExpr(ptr_fun<double,double>(log)).sum();
}
return exp(logDet);
}
///* ************************************************************************* */
//VectorValuesUnordered GaussianBayesNetUnordered::gradientAtZero() const
//{
// return GaussianFactorGraphUnordered(*this).gradientAtZero();
//}
/* ************************************************************************* */
VectorValues gradient(const GaussianBayesNet& bayesNet, const VectorValues& x0) {

View File

@ -93,7 +93,7 @@ namespace gtsam {
* @param bn The GaussianBayesNet on which to perform this computation
* @return The resulting \f$ \delta x \f$ as described above
*/
VectorValuesUnordered optimizeGradientSearch() const;
//VectorValuesUnordered optimizeGradientSearch() const;
///@}
@ -114,7 +114,7 @@ namespace gtsam {
* @param x0 The center about which to compute the gradient
* @return The gradient as a VectorValues
*/
VectorValuesUnordered gradient(const VectorValuesUnordered& x0) const;
//VectorValuesUnordered gradient(const VectorValuesUnordered& x0) const;
/**
* Compute the gradient of the energy function,
@ -125,7 +125,7 @@ namespace gtsam {
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see allocateVectorValues
* @return The gradient as a VectorValues
*/
VectorValuesUnordered gradientAtZero() const;
//VectorValuesUnordered gradientAtZero() const;
/**
* Computes the determinant of a GassianBayesNet

View File

@ -67,42 +67,42 @@ namespace gtsam {
return visitor.collectedResult;
}
/* ************************************************************************* */
VectorValuesUnordered GaussianBayesTreeUnordered::optimizeGradientSearch() const
{
gttic(Compute_Gradient);
// Compute gradient (call gradientAtZero function, which is defined for various linear systems)
VectorValuesUnordered grad = gradientAtZero();
double gradientSqNorm = grad.dot(grad);
gttoc(Compute_Gradient);
///* ************************************************************************* */
//VectorValuesUnordered GaussianBayesTreeUnordered::optimizeGradientSearch() const
//{
// gttic(Compute_Gradient);
// // Compute gradient (call gradientAtZero function, which is defined for various linear systems)
// VectorValuesUnordered grad = gradientAtZero();
// double gradientSqNorm = grad.dot(grad);
// gttoc(Compute_Gradient);
gttic(Compute_Rg);
// Compute R * g
Errors Rg = GaussianFactorGraphUnordered(*this) * grad;
gttoc(Compute_Rg);
// gttic(Compute_Rg);
// // Compute R * g
// Errors Rg = GaussianFactorGraphUnordered(*this) * grad;
// gttoc(Compute_Rg);
gttic(Compute_minimizing_step_size);
// Compute minimizing step size
double step = -gradientSqNorm / dot(Rg, Rg);
gttoc(Compute_minimizing_step_size);
// gttic(Compute_minimizing_step_size);
// // Compute minimizing step size
// double step = -gradientSqNorm / dot(Rg, Rg);
// gttoc(Compute_minimizing_step_size);
gttic(Compute_point);
// Compute steepest descent point
scal(step, grad);
gttoc(Compute_point);
// gttic(Compute_point);
// // Compute steepest descent point
// scal(step, grad);
// gttoc(Compute_point);
return grad;
}
// return grad;
//}
/* ************************************************************************* */
VectorValuesUnordered GaussianBayesTreeUnordered::gradient(const VectorValuesUnordered& x0) const {
return gtsam::gradient(GaussianFactorGraphUnordered(*this), x0);
}
///* ************************************************************************* */
//VectorValuesUnordered GaussianBayesTreeUnordered::gradient(const VectorValuesUnordered& x0) const {
// return GaussianFactorGraphUnordered(*this).gradient(x0);
//}
/* ************************************************************************* */
void GaussianBayesTreeUnordered::gradientAtZeroInPlace(VectorValuesUnordered& g) const {
gradientAtZero(GaussianFactorGraphUnordered(*this), g);
}
///* ************************************************************************* */
//VectorValuesUnordered GaussianBayesTreeUnordered::gradientAtZero() const {
// return GaussianFactorGraphUnordered(*this).gradientAtZero();
//}
/* ************************************************************************* */
double GaussianBayesTreeUnordered::logDeterminant() const

View File

@ -91,17 +91,14 @@ namespace gtsam {
* \f$ G \f$, returning
*
* \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f] */
VectorValuesUnordered optimizeGradientSearch() const;
/** In-place version of optimizeGradientSearch requiring pre-allocated VectorValues \c x */
void optimizeGradientSearchInPlace(VectorValuesUnordered& grad) const;
//VectorValuesUnordered optimizeGradientSearch() const;
/** Compute the gradient of the energy function, \f$ \nabla_{x=x_0} \left\Vert \Sigma^{-1} R x -
* d \right\Vert^2 \f$, centered around \f$ x = x_0 \f$. The gradient is \f$ R^T(Rx-d) \f$.
*
* @param x0 The center about which to compute the gradient
* @return The gradient as a VectorValues */
VectorValuesUnordered gradient(const VectorValuesUnordered& x0) const;
//VectorValuesUnordered gradient(const VectorValuesUnordered& x0) const;
/** Compute the gradient of the energy function, \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} R x - d
* \right\Vert^2 \f$, centered around zero. The gradient about zero is \f$ -R^T d \f$. See also
@ -109,7 +106,7 @@ namespace gtsam {
*
* @param [output] g A VectorValues to store the gradient, which must be preallocated, see
* allocateVectorValues */
void gradientAtZeroInPlace(VectorValuesUnordered& g) const;
//VectorValuesUnordered gradientAtZero() const;
/** Computes the determinant of a GassianBayesTree, as if the Bayes tree is reorganized into a
* matrix. A GassianBayesTree is equivalent to an upper triangular matrix, and for an upper

View File

@ -201,6 +201,41 @@ namespace gtsam {
// return make_pair(conditional, combinedFactor);
//}
///* ************************************************************************* */
//VectorValuesUnordered GaussianFactorGraphUnordered::gradient(const VectorValuesUnordered& x0) const
//{
// VectorValuesUnordered g = VectorValuesUnordered::Zero(x0);
// Errors e = gaussianErrors(*this, x0);
// transposeMultiplyAdd(*this, 1.0, e, g);
// return g;
//}
/* ************************************************************************* */
namespace {
JacobianFactorUnordered::shared_ptr convertToJacobianFactorPtr(const GaussianFactorUnordered::shared_ptr &gf) {
JacobianFactorUnordered::shared_ptr result = boost::dynamic_pointer_cast<JacobianFactorUnordered>(gf);
if( !result ) {
result = boost::make_shared<JacobianFactorUnordered>(*gf); // Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky)
}
return result;
}
}
///* ************************************************************************* */
//VectorValuesUnordered GaussianFactorGraphUnordered::gradientAtZero() const
//{
// assert(false);
// // Zero-out the gradient
// VectorValuesUnordered g;
// Errors e;
// BOOST_FOREACH(const sharedFactor& Ai_G, *this) {
// JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
// e.push_back(-Ai->getb());
// }
// transposeMultiplyAdd(*this, 1.0, e, g);
// return g;
//}
/* ************************************************************************* */
bool hasConstraints(const GaussianFactorGraphUnordered& factors) {
typedef JacobianFactorUnordered J;
@ -235,126 +270,90 @@ namespace gtsam {
/* ************************************************************************* */
namespace {
JacobianFactorUnordered::shared_ptr convertToJacobianFactorPtr(const GaussianFactorUnordered::shared_ptr &gf) {
JacobianFactorUnordered::shared_ptr result = boost::dynamic_pointer_cast<JacobianFactorUnordered>(gf);
if( !result ) {
result = boost::make_shared<JacobianFactorUnordered>(*gf); // Convert any non-Jacobian factors to Jacobians (e.g. Hessian -> Jacobian with Cholesky)
}
return result;
}
}
///* ************************************************************************* */
//Errors operator*(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x) {
// Errors e;
// BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
// JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
// e.push_back((*Ai)*x);
// }
// return e;
//}
/* ************************************************************************* */
Errors operator*(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x) {
Errors e;
BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
e.push_back((*Ai)*x);
}
return e;
}
///* ************************************************************************* */
//void multiplyInPlace(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x, Errors& e) {
// multiplyInPlace(fg,x,e.begin());
//}
/* ************************************************************************* */
void multiplyInPlace(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x, Errors& e) {
multiplyInPlace(fg,x,e.begin());
}
///* ************************************************************************* */
//void multiplyInPlace(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x, const Errors::iterator& e) {
// Errors::iterator ei = e;
// BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
// JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
// *ei = (*Ai)*x;
// ei++;
// }
//}
/* ************************************************************************* */
void multiplyInPlace(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x, const Errors::iterator& e) {
Errors::iterator ei = e;
BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
*ei = (*Ai)*x;
ei++;
}
}
///* ************************************************************************* */
//// x += alpha*A'*e
//void transposeMultiplyAdd(const GaussianFactorGraphUnordered& fg, double alpha, const Errors& e, VectorValuesUnordered& x) {
// // For each factor add the gradient contribution
// Errors::const_iterator ei = e.begin();
// BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
// JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
// Ai->transposeMultiplyAdd(alpha,*(ei++),x);
// }
//}
/* ************************************************************************* */
// x += alpha*A'*e
void transposeMultiplyAdd(const GaussianFactorGraphUnordered& fg, double alpha, const Errors& e, VectorValuesUnordered& x) {
// For each factor add the gradient contribution
Errors::const_iterator ei = e.begin();
BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
Ai->transposeMultiplyAdd(alpha,*(ei++),x);
}
}
///* ************************************************************************* */
//void residual(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &x, VectorValuesUnordered &r) {
// Key i = 0 ;
// BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
// JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
// r[i] = Ai->getb();
// i++;
// }
// VectorValuesUnordered Ax = VectorValuesUnordered::SameStructure(r);
// multiply(fg,x,Ax);
// axpy(-1.0,Ax,r);
//}
/* ************************************************************************* */
VectorValuesUnordered gradient(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x0) {
// It is crucial for performance to make a zero-valued clone of x
VectorValuesUnordered g = VectorValuesUnordered::Zero(x0);
Errors e;
BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
e.push_back(Ai->error_vector(x0));
}
transposeMultiplyAdd(fg, 1.0, e, g);
return g;
}
///* ************************************************************************* */
//void multiply(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &x, VectorValuesUnordered &r) {
// r.setZero();
// Key i = 0;
// BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
// JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
// Vector &y = r[i];
// for(JacobianFactorUnordered::const_iterator j = Ai->begin(); j != Ai->end(); ++j) {
// y += Ai->getA(j) * x[*j];
// }
// ++i;
// }
//}
/* ************************************************************************* */
void gradientAtZero(const GaussianFactorGraphUnordered& fg, VectorValuesUnordered& g) {
// Zero-out the gradient
g.setZero();
Errors e;
BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
e.push_back(-Ai->getb());
}
transposeMultiplyAdd(fg, 1.0, e, g);
}
///* ************************************************************************* */
//void transposeMultiply(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &r, VectorValuesUnordered &x) {
// x.setZero();
// Key i = 0;
// BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
// JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
// for(JacobianFactorUnordered::const_iterator j = Ai->begin(); j != Ai->end(); ++j) {
// x[*j] += Ai->getA(j).transpose() * r[i];
// }
// ++i;
// }
//}
/* ************************************************************************* */
void residual(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &x, VectorValuesUnordered &r) {
Key i = 0 ;
BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
r[i] = Ai->getb();
i++;
}
VectorValuesUnordered Ax = VectorValuesUnordered::SameStructure(r);
multiply(fg,x,Ax);
axpy(-1.0,Ax,r);
}
/* ************************************************************************* */
void multiply(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &x, VectorValuesUnordered &r) {
r.setZero();
Key i = 0;
BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
Vector &y = r[i];
for(JacobianFactorUnordered::const_iterator j = Ai->begin(); j != Ai->end(); ++j) {
y += Ai->getA(j) * x[*j];
}
++i;
}
}
/* ************************************************************************* */
void transposeMultiply(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &r, VectorValuesUnordered &x) {
x.setZero();
Key i = 0;
BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
for(JacobianFactorUnordered::const_iterator j = Ai->begin(); j != Ai->end(); ++j) {
x[*j] += Ai->getA(j).transpose() * r[i];
}
++i;
}
}
/* ************************************************************************* */
boost::shared_ptr<Errors> gaussianErrors_(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x) {
boost::shared_ptr<Errors> e(new Errors);
BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
e->push_back(Ai->error_vector(x));
}
return e;
}
///* ************************************************************************* */
//Errors gaussianErrors(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x) {
// Errors e;
// BOOST_FOREACH(const GaussianFactorUnordered::shared_ptr& Ai_G, fg) {
// JacobianFactorUnordered::shared_ptr Ai = convertToJacobianFactorPtr(Ai_G);
// e.push_back(Ai->error_vector(x));
// }
// return e;
//}
} // namespace gtsam

View File

@ -204,18 +204,42 @@ namespace gtsam {
* @param x0 The center about which to compute the gradient
* @return The gradient as a VectorValuesUnordered
*/
VectorValuesUnordered gradient(const VectorValuesUnordered& x0) const;
//VectorValuesUnordered gradient(const VectorValuesUnordered& x0) const;
/**
* Compute the gradient of the energy function,
* \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} A x - b \right\Vert^2 \f$,
* centered around zero.
* The gradient is \f$ A^T(Ax-b) \f$.
* Compute the gradient of the energy function, \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} A x - b
* \right\Vert^2 \f$, centered around zero. The gradient is \f$ A^T(Ax-b) \f$.
* @param fg The Jacobian factor graph $(A,b)$
* @param [output] g A VectorValuesUnordered to store the gradient, which must be preallocated, see allocateVectorValues
* @return The gradient as a VectorValuesUnordered
*/
VectorValuesUnordered gradientAtZero() const;
* @param [output] g A VectorValuesUnordered to store the gradient, which must be preallocated,
* see allocateVectorValues
* @return The gradient as a VectorValuesUnordered */
//VectorValuesUnordered gradientAtZero() const;
/** Optimize along the gradient direction, with a closed-form computation to perform the line
* search. The gradient is computed about \f$ \delta x=0 \f$.
*
* This function returns \f$ \delta x \f$ that minimizes a reparametrized problem. The error
* function of a GaussianBayesNet is
*
* \f[ f(\delta x) = \frac{1}{2} |R \delta x - d|^2 = \frac{1}{2}d^T d - d^T R \delta x +
* \frac{1}{2} \delta x^T R^T R \delta x \f]
*
* with gradient and Hessian
*
* \f[ g(\delta x) = R^T(R\delta x - d), \qquad G(\delta x) = R^T R. \f]
*
* This function performs the line search in the direction of the gradient evaluated at \f$ g =
* g(\delta x = 0) \f$ with step size \f$ \alpha \f$ that minimizes \f$ f(\delta x = \alpha g)
* \f$:
*
* \f[ f(\alpha) = \frac{1}{2} d^T d + g^T \delta x + \frac{1}{2} \alpha^2 g^T G g \f]
*
* Optimizing by setting the derivative to zero yields \f$ \hat \alpha = (-g^T g) / (g^T G g)
* \f$. For efficiency, this function evaluates the denominator without computing the Hessian
* \f$ G \f$, returning
*
* \f[ \delta x = \hat\alpha g = \frac{-g^T g}{(R g)^T(R g)} \f] */
//VectorValuesUnordered optimizeGradientSearch() const;
/// @}
@ -280,25 +304,25 @@ namespace gtsam {
/****** Linear Algebra Opeations ******/
/** return A*x */
GTSAM_EXPORT Errors operator*(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x);
///** return A*x */
//GTSAM_EXPORT Errors operator*(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x);
/** In-place version e <- A*x that overwrites e. */
GTSAM_EXPORT void multiplyInPlace(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x, Errors& e);
///** In-place version e <- A*x that overwrites e. */
//GTSAM_EXPORT void multiplyInPlace(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x, Errors& e);
/** In-place version e <- A*x that takes an iterator. */
GTSAM_EXPORT void multiplyInPlace(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x, const Errors::iterator& e);
///** In-place version e <- A*x that takes an iterator. */
//GTSAM_EXPORT void multiplyInPlace(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x, const Errors::iterator& e);
/** x += alpha*A'*e */
GTSAM_EXPORT void transposeMultiplyAdd(const GaussianFactorGraphUnordered& fg, double alpha, const Errors& e, VectorValuesUnordered& x);
///** x += alpha*A'*e */
//GTSAM_EXPORT void transposeMultiplyAdd(const GaussianFactorGraphUnordered& fg, double alpha, const Errors& e, VectorValuesUnordered& x);
/* matrix-vector operations */
GTSAM_EXPORT void residual(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &x, VectorValuesUnordered &r);
GTSAM_EXPORT void multiply(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &x, VectorValuesUnordered &r);
GTSAM_EXPORT void transposeMultiply(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &r, VectorValuesUnordered &x);
///* matrix-vector operations */
//GTSAM_EXPORT void residual(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &x, VectorValuesUnordered &r);
//GTSAM_EXPORT void multiply(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &x, VectorValuesUnordered &r);
//GTSAM_EXPORT void transposeMultiply(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered &r, VectorValuesUnordered &x);
/** return A*x-b
* \todo Make this a member function - affects SubgraphPreconditioner */
GTSAM_EXPORT Errors gaussianErrors(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x);
///** return A*x-b
// * \todo Make this a member function - affects SubgraphPreconditioner */
//GTSAM_EXPORT Errors gaussianErrors(const GaussianFactorGraphUnordered& fg, const VectorValuesUnordered& x);
} // namespace gtsam

View File

@ -1,560 +0,0 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file HessianFactor.cpp
* @author Richard Roberts
* @date Dec 8, 2010
*/
#include <sstream>
#include <boost/foreach.hpp>
#include <boost/format.hpp>
#include <boost/make_shared.hpp>
#include <boost/tuple/tuple.hpp>
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-variable"
#endif
#include <boost/bind.hpp>
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
#include <gtsam/base/debug.h>
#include <gtsam/base/timing.h>
#include <gtsam/base/Matrix.h>
#include <gtsam/base/FastMap.h>
#include <gtsam/base/cholesky.h>
#include <gtsam/linear/linearExceptions.h>
#include <gtsam/linear/GaussianConditional.h>
#include <gtsam/linear/HessianFactor.h>
#include <gtsam/linear/JacobianFactor.h>
#include <gtsam/linear/GaussianFactorGraph.h>
#include <gtsam/linear/GaussianBayesNet.h>
using namespace std;
namespace gtsam {
/* ************************************************************************* */
string SlotEntry::toString() const {
ostringstream oss;
oss << "SlotEntry: slot=" << slot << ", dim=" << dimension;
return oss.str();
}
/* ************************************************************************* */
Scatter::Scatter(const FactorGraph<GaussianFactor>& gfg) {
// First do the set union.
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, gfg) {
if(factor) {
for(GaussianFactor::const_iterator variable = factor->begin(); variable != factor->end(); ++variable) {
this->insert(make_pair(*variable, SlotEntry(0, factor->getDim(variable))));
}
}
}
// Next fill in the slot indices (we can only get these after doing the set
// union.
size_t slot = 0;
BOOST_FOREACH(value_type& var_slot, *this) {
var_slot.second.slot = (slot ++);
}
}
/* ************************************************************************* */
void HessianFactor::assertInvariants() const {
GaussianFactor::assertInvariants(); // The base class checks for unique keys
}
/* ************************************************************************* */
HessianFactor::HessianFactor(const HessianFactor& gf) :
GaussianFactor(gf), info_(matrix_) {
info_.assignNoalias(gf.info_);
assertInvariants();
}
/* ************************************************************************* */
HessianFactor::HessianFactor() : info_(matrix_) {
// The empty HessianFactor has only a constant error term of zero
FastVector<size_t> dims;
dims.push_back(1);
info_.resize(dims.begin(), dims.end(), false);
info_(0,0)(0,0) = 0.0;
assertInvariants();
}
/* ************************************************************************* */
HessianFactor::HessianFactor(Index j, const Matrix& G, const Vector& g, double f) :
GaussianFactor(j), info_(matrix_) {
if(G.rows() != G.cols() || G.rows() != g.size())
throw invalid_argument("Inconsistent matrix and/or vector dimensions in HessianFactor constructor");
size_t dims[] = { G.rows(), 1 };
InfoMatrix fullMatrix(G.rows() + 1, G.rows() + 1);
BlockInfo infoMatrix(fullMatrix, dims, dims+2);
infoMatrix(0,0) = G;
infoMatrix.column(0,1,0) = g;
infoMatrix(1,1)(0,0) = f;
infoMatrix.swap(info_);
assertInvariants();
}
/* ************************************************************************* */
// error is 0.5*(x-mu)'*inv(Sigma)*(x-mu) = 0.5*(x'*G*x - 2*x'*G*mu + mu'*G*mu)
// where G = inv(Sigma), g = G*mu, f = mu'*G*mu = mu'*g
HessianFactor::HessianFactor(Index j, const Vector& mu, const Matrix& Sigma) :
GaussianFactor(j), info_(matrix_) {
if (Sigma.rows() != Sigma.cols() || Sigma.rows() != mu.size()) throw invalid_argument(
"Inconsistent matrix and/or vector dimensions in HessianFactor constructor");
Matrix G = inverse(Sigma);
Vector g = G * mu;
double f = dot(mu, g);
size_t dims[] = { G.rows(), 1 };
InfoMatrix fullMatrix(G.rows() + 1, G.rows() + 1);
BlockInfo infoMatrix(fullMatrix, dims, dims + 2);
infoMatrix(0, 0) = G;
infoMatrix.column(0, 1, 0) = g;
infoMatrix(1, 1)(0, 0) = f;
infoMatrix.swap(info_);
assertInvariants();
}
/* ************************************************************************* */
HessianFactor::HessianFactor(Index j1, Index j2,
const Matrix& G11, const Matrix& G12, const Vector& g1,
const Matrix& G22, const Vector& g2, double f) :
GaussianFactor(j1, j2), info_(matrix_) {
if(G11.rows() != G11.cols() || G11.rows() != G12.rows() || G11.rows() != g1.size() ||
G22.cols() != G12.cols() || G22.cols() != g2.size())
throw invalid_argument("Inconsistent matrix and/or vector dimensions in HessianFactor constructor");
size_t dims[] = { G11.rows(), G22.rows(), 1 };
InfoMatrix fullMatrix(G11.rows() + G22.rows() + 1, G11.rows() + G22.rows() + 1);
BlockInfo infoMatrix(fullMatrix, dims, dims+3);
infoMatrix(0,0) = G11;
infoMatrix(0,1) = G12;
infoMatrix.column(0,2,0) = g1;
infoMatrix(1,1) = G22;
infoMatrix.column(1,2,0) = g2;
infoMatrix(2,2)(0,0) = f;
infoMatrix.swap(info_);
assertInvariants();
}
/* ************************************************************************* */
HessianFactor::HessianFactor(Index j1, Index j2, Index j3,
const Matrix& G11, const Matrix& G12, const Matrix& G13, const Vector& g1,
const Matrix& G22, const Matrix& G23, const Vector& g2,
const Matrix& G33, const Vector& g3, double f) :
GaussianFactor(j1, j2, j3), info_(matrix_) {
if(G11.rows() != G11.cols() || G11.rows() != G12.rows() || G11.rows() != G13.rows() || G11.rows() != g1.size() ||
G22.cols() != G12.cols() || G33.cols() != G13.cols() || G22.cols() != g2.size() || G33.cols() != g3.size())
throw invalid_argument("Inconsistent matrix and/or vector dimensions in HessianFactor constructor");
size_t dims[] = { G11.rows(), G22.rows(), G33.rows(), 1 };
InfoMatrix fullMatrix(G11.rows() + G22.rows() + G33.rows() + 1, G11.rows() + G22.rows() + G33.rows() + 1);
BlockInfo infoMatrix(fullMatrix, dims, dims+4);
infoMatrix(0,0) = G11;
infoMatrix(0,1) = G12;
infoMatrix(0,2) = G13;
infoMatrix.column(0,3,0) = g1;
infoMatrix(1,1) = G22;
infoMatrix(1,2) = G23;
infoMatrix.column(1,3,0) = g2;
infoMatrix(2,2) = G33;
infoMatrix.column(2,3,0) = g3;
infoMatrix(3,3)(0,0) = f;
infoMatrix.swap(info_);
assertInvariants();
}
/* ************************************************************************* */
HessianFactor::HessianFactor(const std::vector<Index>& js, const std::vector<Matrix>& Gs,
const std::vector<Vector>& gs, double f) : GaussianFactor(js), info_(matrix_) {
// Get the number of variables
size_t variable_count = js.size();
// Verify the provided number of entries in the vectors are consistent
if(gs.size() != variable_count || Gs.size() != (variable_count*(variable_count+1))/2)
throw invalid_argument("Inconsistent number of entries between js, Gs, and gs in HessianFactor constructor.\nThe number of keys provided \
in js must match the number of linear vector pieces in gs. The number of upper-diagonal blocks in Gs must be n*(n+1)/2");
// Verify the dimensions of each provided matrix are consistent
// Note: equations for calculating the indices derived from the "sum of an arithmetic sequence" formula
for(size_t i = 0; i < variable_count; ++i){
int block_size = gs[i].size();
// Check rows
for(size_t j = 0; j < variable_count-i; ++j){
size_t index = i*(2*variable_count - i + 1)/2 + j;
if(Gs[index].rows() != block_size){
throw invalid_argument("Inconsistent matrix and/or vector dimensions in HessianFactor constructor");
}
}
// Check cols
for(size_t j = 0; j <= i; ++j){
size_t index = j*(2*variable_count - j + 1)/2 + (i-j);
if(Gs[index].cols() != block_size){
throw invalid_argument("Inconsistent matrix and/or vector dimensions in HessianFactor constructor");
}
}
}
// Create the dims vector
size_t* dims = (size_t*)alloca(sizeof(size_t)*(variable_count+1)); // FIXME: alloca is bad, just ask Google.
size_t total_size = 0;
for(unsigned int i = 0; i < variable_count; ++i){
dims[i] = gs[i].size();
total_size += gs[i].size();
}
dims[variable_count] = 1;
total_size += 1;
// Fill in the internal matrix with the supplied blocks
InfoMatrix fullMatrix(total_size, total_size);
BlockInfo infoMatrix(fullMatrix, dims, dims+variable_count+1);
size_t index = 0;
for(size_t i = 0; i < variable_count; ++i){
for(size_t j = i; j < variable_count; ++j){
infoMatrix(i,j) = Gs[index++];
}
infoMatrix.column(i,variable_count,0) = gs[i];
}
infoMatrix(variable_count,variable_count)(0,0) = f;
// update the BlockView variable
infoMatrix.swap(info_);
assertInvariants();
}
/* ************************************************************************* */
HessianFactor::HessianFactor(const GaussianConditional& cg) : GaussianFactor(cg), info_(matrix_) {
JacobianFactor jf(cg);
info_.copyStructureFrom(jf.Ab_);
matrix_.noalias() = jf.matrix_.transpose() * jf.matrix_;
assertInvariants();
}
/* ************************************************************************* */
HessianFactor::HessianFactor(const GaussianFactor& gf) : info_(matrix_) {
// Copy the variable indices
(GaussianFactor&)(*this) = gf;
// Copy the matrix data depending on what type of factor we're copying from
if(dynamic_cast<const JacobianFactor*>(&gf)) {
const JacobianFactor& jf(static_cast<const JacobianFactor&>(gf));
if(jf.model_->isConstrained())
throw invalid_argument("Cannot construct HessianFactor from JacobianFactor with constrained noise model");
else {
Vector invsigmas = jf.model_->invsigmas().cwiseProduct(jf.model_->invsigmas());
info_.copyStructureFrom(jf.Ab_);
BlockInfo::constBlock A = jf.Ab_.full();
matrix_.noalias() = A.transpose() * invsigmas.asDiagonal() * A;
}
} else if(dynamic_cast<const HessianFactor*>(&gf)) {
const HessianFactor& hf(static_cast<const HessianFactor&>(gf));
info_.assignNoalias(hf.info_);
} else
throw std::invalid_argument("In HessianFactor(const GaussianFactor& gf), gf is neither a JacobianFactor nor a HessianFactor");
assertInvariants();
}
/* ************************************************************************* */
HessianFactor::HessianFactor(const FactorGraph<GaussianFactor>& factors) : info_(matrix_)
{
Scatter scatter(factors);
// Pull out keys and dimensions
gttic(keys);
vector<size_t> dimensions(scatter.size() + 1);
BOOST_FOREACH(const Scatter::value_type& var_slot, scatter) {
dimensions[var_slot.second.slot] = var_slot.second.dimension;
}
// This is for the r.h.s. vector
dimensions.back() = 1;
gttoc(keys);
const bool debug = ISDEBUG("EliminateCholesky");
// Form Ab' * Ab
gttic(allocate);
info_.resize(dimensions.begin(), dimensions.end(), false);
// Fill in keys
keys_.resize(scatter.size());
std::transform(scatter.begin(), scatter.end(), keys_.begin(), boost::bind(&Scatter::value_type::first, ::_1));
gttoc(allocate);
gttic(zero);
matrix_.noalias() = Matrix::Zero(matrix_.rows(),matrix_.cols());
gttoc(zero);
gttic(update);
if (debug) cout << "Combining " << factors.size() << " factors" << endl;
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factors)
{
if(factor) {
if(shared_ptr hessian = boost::dynamic_pointer_cast<HessianFactor>(factor))
updateATA(*hessian, scatter);
else if(JacobianFactor::shared_ptr jacobianFactor = boost::dynamic_pointer_cast<JacobianFactor>(factor))
updateATA(*jacobianFactor, scatter);
else
throw invalid_argument("GaussianFactor is neither Hessian nor Jacobian");
}
}
gttoc(update);
if (debug) gtsam::print(matrix_, "Ab' * Ab: ");
assertInvariants();
}
/* ************************************************************************* */
HessianFactor& HessianFactor::operator=(const HessianFactor& rhs) {
this->Base::operator=(rhs); // Copy keys
info_.assignNoalias(rhs.info_); // Copy matrix and block structure
return *this;
}
/* ************************************************************************* */
void HessianFactor::print(const std::string& s, const IndexFormatter& formatter) const {
cout << s << "\n";
cout << " keys: ";
for(const_iterator key=this->begin(); key!=this->end(); ++key)
cout << formatter(*key) << "(" << this->getDim(key) << ") ";
cout << "\n";
gtsam::print(Matrix(info_.range(0,info_.nBlocks(), 0,info_.nBlocks()).selfadjointView<Eigen::Upper>()), "Ab^T * Ab: ");
}
/* ************************************************************************* */
bool HessianFactor::equals(const GaussianFactor& lf, double tol) const {
if(!dynamic_cast<const HessianFactor*>(&lf))
return false;
else {
Matrix thisMatrix = this->info_.full().selfadjointView<Eigen::Upper>();
thisMatrix(thisMatrix.rows()-1, thisMatrix.cols()-1) = 0.0;
Matrix rhsMatrix = static_cast<const HessianFactor&>(lf).info_.full().selfadjointView<Eigen::Upper>();
rhsMatrix(rhsMatrix.rows()-1, rhsMatrix.cols()-1) = 0.0;
return equal_with_abs_tol(thisMatrix, rhsMatrix, tol);
}
}
/* ************************************************************************* */
Matrix HessianFactor::augmentedInformation() const {
return info_.full().selfadjointView<Eigen::Upper>();
}
/* ************************************************************************* */
Matrix HessianFactor::information() const {
return info_.range(0, this->size(), 0, this->size()).selfadjointView<Eigen::Upper>();
}
/* ************************************************************************* */
double HessianFactor::error(const VectorValues& c) const {
// error 0.5*(f - 2*x'*g + x'*G*x)
const double f = constantTerm();
double xtg = 0, xGx = 0;
// extract the relevant subset of the VectorValues
// NOTE may not be as efficient
const Vector x = c.vector(this->keys());
xtg = x.dot(linearTerm());
xGx = x.transpose() * info_.range(0, this->size(), 0, this->size()).selfadjointView<Eigen::Upper>() * x;
return 0.5 * (f - 2.0 * xtg + xGx);
}
/* ************************************************************************* */
void HessianFactor::updateATA(const HessianFactor& update, const Scatter& scatter) {
// This function updates 'combined' with the information in 'update'.
// 'scatter' maps variables in the update factor to slots in the combined
// factor.
const bool debug = ISDEBUG("updateATA");
// First build an array of slots
gttic(slots);
size_t* slots = (size_t*)alloca(sizeof(size_t)*update.size()); // FIXME: alloca is bad, just ask Google.
size_t slot = 0;
BOOST_FOREACH(Index j, update) {
slots[slot] = scatter.find(j)->second.slot;
++ slot;
}
gttoc(slots);
if(debug) {
this->print("Updating this: ");
update.print("with (Hessian): ");
}
// Apply updates to the upper triangle
gttic(update);
for(size_t j2=0; j2<update.info_.nBlocks(); ++j2) {
size_t slot2 = (j2 == update.size()) ? this->info_.nBlocks()-1 : slots[j2];
for(size_t j1=0; j1<=j2; ++j1) {
size_t slot1 = (j1 == update.size()) ? this->info_.nBlocks()-1 : slots[j1];
if(slot2 > slot1) {
if(debug)
cout << "Updating (" << slot1 << "," << slot2 << ") from (" << j1 << "," << j2 << ")" << endl;
matrix_.block(info_.offset(slot1), info_.offset(slot2), info_(slot1,slot2).rows(), info_(slot1,slot2).cols()).noalias() +=
update.matrix_.block(update.info_.offset(j1), update.info_.offset(j2), update.info_(j1,j2).rows(), update.info_(j1,j2).cols());
} else if(slot1 > slot2) {
if(debug)
cout << "Updating (" << slot2 << "," << slot1 << ") from (" << j1 << "," << j2 << ")" << endl;
matrix_.block(info_.offset(slot2), info_.offset(slot1), info_(slot2,slot1).rows(), info_(slot2,slot1).cols()).noalias() +=
update.matrix_.block(update.info_.offset(j1), update.info_.offset(j2), update.info_(j1,j2).rows(), update.info_(j1,j2).cols()).transpose();
} else {
if(debug)
cout << "Updating (" << slot1 << "," << slot2 << ") from (" << j1 << "," << j2 << ")" << endl;
matrix_.block(info_.offset(slot1), info_.offset(slot2), info_(slot1,slot2).rows(), info_(slot1,slot2).cols()).triangularView<Eigen::Upper>() +=
update.matrix_.block(update.info_.offset(j1), update.info_.offset(j2), update.info_(j1,j2).rows(), update.info_(j1,j2).cols());
}
if(debug) cout << "Updating block " << slot1 << "," << slot2 << " from block " << j1 << "," << j2 << "\n";
if(debug) this->print();
}
}
gttoc(update);
}
/* ************************************************************************* */
void HessianFactor::updateATA(const JacobianFactor& update, const Scatter& scatter) {
// This function updates 'combined' with the information in 'update'.
// 'scatter' maps variables in the update factor to slots in the combined
// factor.
const bool debug = ISDEBUG("updateATA");
// First build an array of slots
gttic(slots);
size_t* slots = (size_t*)alloca(sizeof(size_t)*update.size()); // FIXME: alloca is bad, just ask Google.
size_t slot = 0;
BOOST_FOREACH(Index j, update) {
slots[slot] = scatter.find(j)->second.slot;
++ slot;
}
gttoc(slots);
gttic(form_ATA);
if(update.model_->isConstrained())
throw invalid_argument("Cannot update HessianFactor from JacobianFactor with constrained noise model");
if(debug) {
this->print("Updating this: ");
update.print("with (Jacobian): ");
}
typedef Eigen::Block<const JacobianFactor::AbMatrix> BlockUpdateMatrix;
BlockUpdateMatrix updateA(update.matrix_.block(
update.Ab_.rowStart(),update.Ab_.offset(0), update.Ab_.full().rows(), update.Ab_.full().cols()));
if (debug) cout << "updateA: \n" << updateA << endl;
Matrix updateInform;
if(boost::dynamic_pointer_cast<noiseModel::Unit>(update.model_)) {
updateInform.noalias() = updateA.transpose() * updateA;
} else {
noiseModel::Diagonal::shared_ptr diagonal(boost::dynamic_pointer_cast<noiseModel::Diagonal>(update.model_));
if(diagonal) {
Vector invsigmas2 = update.model_->invsigmas().cwiseProduct(update.model_->invsigmas());
updateInform.noalias() = updateA.transpose() * invsigmas2.asDiagonal() * updateA;
} else
throw invalid_argument("In HessianFactor::updateATA, JacobianFactor noise model is neither Unit nor Diagonal");
}
if (debug) cout << "updateInform: \n" << updateInform << endl;
gttoc(form_ATA);
// Apply updates to the upper triangle
gttic(update);
for(size_t j2=0; j2<update.Ab_.nBlocks(); ++j2) {
size_t slot2 = (j2 == update.size()) ? this->info_.nBlocks()-1 : slots[j2];
for(size_t j1=0; j1<=j2; ++j1) {
size_t slot1 = (j1 == update.size()) ? this->info_.nBlocks()-1 : slots[j1];
size_t off0 = update.Ab_.offset(0);
if(slot2 > slot1) {
if(debug)
cout << "Updating (" << slot1 << "," << slot2 << ") from (" << j1 << "," << j2 << ")" << endl;
matrix_.block(info_.offset(slot1), info_.offset(slot2), info_(slot1,slot2).rows(), info_(slot1,slot2).cols()).noalias() +=
updateInform.block(update.Ab_.offset(j1)-off0, update.Ab_.offset(j2)-off0, update.Ab_(j1).cols(), update.Ab_(j2).cols());
} else if(slot1 > slot2) {
if(debug)
cout << "Updating (" << slot2 << "," << slot1 << ") from (" << j1 << "," << j2 << ")" << endl;
matrix_.block(info_.offset(slot2), info_.offset(slot1), info_(slot2,slot1).rows(), info_(slot2,slot1).cols()).noalias() +=
updateInform.block(update.Ab_.offset(j1)-off0, update.Ab_.offset(j2)-off0, update.Ab_(j1).cols(), update.Ab_(j2).cols()).transpose();
} else {
if(debug)
cout << "Updating (" << slot1 << "," << slot2 << ") from (" << j1 << "," << j2 << ")" << endl;
matrix_.block(info_.offset(slot1), info_.offset(slot2), info_(slot1,slot2).rows(), info_(slot1,slot2).cols()).triangularView<Eigen::Upper>() +=
updateInform.block(update.Ab_.offset(j1)-off0, update.Ab_.offset(j2)-off0, update.Ab_(j1).cols(), update.Ab_(j2).cols());
}
if(debug) cout << "Updating block " << slot1 << "," << slot2 << " from block " << j1 << "," << j2 << "\n";
if(debug) this->print();
}
}
gttoc(update);
}
/* ************************************************************************* */
void HessianFactor::partialCholesky(size_t nrFrontals) {
if(!choleskyPartial(matrix_, info_.offset(nrFrontals)))
throw IndeterminantLinearSystemException(this->keys().front());
}
/* ************************************************************************* */
GaussianConditional::shared_ptr HessianFactor::splitEliminatedFactor(size_t nrFrontals) {
static const bool debug = false;
// Extract conditionals
gttic(extract_conditionals);
GaussianConditional::shared_ptr conditional(new GaussianConditional());
typedef VerticalBlockView<Matrix> BlockAb;
BlockAb Ab(matrix_, info_);
size_t varDim = info_.offset(nrFrontals);
Ab.rowEnd() = Ab.rowStart() + varDim;
// Create one big conditionals with many frontal variables.
gttic(construct_cond);
Vector sigmas = Vector::Ones(varDim);
conditional = boost::make_shared<ConditionalType>(keys_.begin(), keys_.end(), nrFrontals, Ab, sigmas);
gttoc(construct_cond);
if(debug) conditional->print("Extracted conditional: ");
gttoc(extract_conditionals);
// Take lower-right block of Ab_ to get the new factor
gttic(remaining_factor);
info_.blockStart() = nrFrontals;
// Assign the keys
vector<Index> remainingKeys(keys_.size() - nrFrontals);
remainingKeys.assign(keys_.begin() + nrFrontals, keys_.end());
keys_.swap(remainingKeys);
gttoc(remaining_factor);
return conditional;
}
/* ************************************************************************* */
GaussianFactor::shared_ptr HessianFactor::negate() const {
// Copy Hessian Blocks from Hessian factor and invert
std::vector<Index> js;
std::vector<Matrix> Gs;
std::vector<Vector> gs;
double f;
js.insert(js.end(), begin(), end());
for(size_t i = 0; i < js.size(); ++i){
for(size_t j = i; j < js.size(); ++j){
Gs.push_back( -info(begin()+i, begin()+j) );
}
gs.push_back( -linearTerm(begin()+i) );
}
f = -constantTerm();
// Create the Anti-Hessian Factor from the negated blocks
return HessianFactor::shared_ptr(new HessianFactor(js, Gs, gs, f));
}
} // gtsam

View File

@ -1,377 +0,0 @@
/* ----------------------------------------------------------------------------
* GTSAM Copyright 2010, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* Authors: Frank Dellaert, et al. (see THANKS for the full author list)
* See LICENSE for the license information
* -------------------------------------------------------------------------- */
/**
* @file HessianFactor.h
* @brief Contains the HessianFactor class, a general quadratic factor
* @author Richard Roberts
* @date Dec 8, 2010
*/
#pragma once
#include <gtsam/base/blockMatrices.h>
#include <gtsam/inference/FactorGraph.h>
#include <gtsam/linear/GaussianFactor.h>
// Forward declarations for friend unit tests
class HessianFactorConversionConstructorTest;
class HessianFactorConstructor1Test;
class HessianFactorConstructor1bTest;
class HessianFactorcombineTest;
namespace gtsam {
// Forward declarations
class JacobianFactor;
class GaussianConditional;
template<class C> class BayesNet;
// Definition of Scatter, which is an intermediate data structure used when
// building a HessianFactor incrementally, to get the keys in the right
// order.
// The "scatter" is a map from global variable indices to slot indices in the
// union of involved variables. We also include the dimensionality of the
// variable.
struct GTSAM_EXPORT SlotEntry {
size_t slot;
size_t dimension;
SlotEntry(size_t _slot, size_t _dimension)
: slot(_slot), dimension(_dimension) {}
std::string toString() const;
};
class Scatter : public FastMap<Index, SlotEntry> {
public:
Scatter() {}
Scatter(const FactorGraph<GaussianFactor>& gfg);
};
/**
* @brief A Gaussian factor using the canonical parameters (information form)
*
* HessianFactor implements a general quadratic factor of the form
* \f[ E(x) = 0.5 x^T G x - x^T g + 0.5 f \f]
* that stores the matrix \f$ G \f$, the vector \f$ g \f$, and the constant term \f$ f \f$.
*
* When \f$ G \f$ is positive semidefinite, this factor represents a Gaussian,
* in which case \f$ G \f$ is the information matrix \f$ \Lambda \f$,
* \f$ g \f$ is the information vector \f$ \eta \f$, and \f$ f \f$ is the residual
* sum-square-error at the mean, when \f$ x = \mu \f$.
*
* Indeed, the negative log-likelihood of a Gaussian is (up to a constant)
* @f$ E(x) = 0.5(x-\mu)^T P^{-1} (x-\mu) @f$
* with @f$ \mu @f$ the mean and @f$ P @f$ the covariance matrix. Expanding the product we get
* @f[
* E(x) = 0.5 x^T P^{-1} x - x^T P^{-1} \mu + 0.5 \mu^T P^{-1} \mu
* @f]
* We define the Information matrix (or Hessian) @f$ \Lambda = P^{-1} @f$
* and the information vector @f$ \eta = P^{-1} \mu = \Lambda \mu @f$
* to arrive at the canonical form of the Gaussian:
* @f[
* E(x) = 0.5 x^T \Lambda x - x^T \eta + 0.5 \mu^T \Lambda \mu
* @f]
*
* This factor is one of the factors that can be in a GaussianFactorGraph.
* It may be returned from NonlinearFactor::linearize(), but is also
* used internally to store the Hessian during Cholesky elimination.
*
* This can represent a quadratic factor with characteristics that cannot be
* represented using a JacobianFactor (which has the form
* \f$ E(x) = \Vert Ax - b \Vert^2 \f$ and stores the Jacobian \f$ A \f$
* and error vector \f$ b \f$, i.e. is a sum-of-squares factor). For example,
* a HessianFactor need not be positive semidefinite, it can be indefinite or
* even negative semidefinite.
*
* If a HessianFactor is indefinite or negative semi-definite, then in order
* for solving the linear system to be possible,
* the Hessian of the full system must be positive definite (i.e. when all
* small Hessians are combined, the result must be positive definite). If
* this is not the case, an error will occur during elimination.
*
* This class stores G, g, and f as an augmented matrix HessianFactor::matrix_.
* The upper-left n x n blocks of HessianFactor::matrix_ store the upper-right
* triangle of G, the upper-right-most column of length n of HessianFactor::matrix_
* stores g, and the lower-right entry of HessianFactor::matrix_ stores f, i.e.
* \code
HessianFactor::matrix_ = [ G11 G12 G13 ... g1
0 G22 G23 ... g2
0 0 G33 ... g3
: : : :
0 0 0 ... f ]
\endcode
Blocks can be accessed as follows:
\code
G11 = info(begin(), begin());
G12 = info(begin(), begin()+1);
G23 = info(begin()+1, begin()+2);
g2 = linearTerm(begin()+1);
f = constantTerm();
.......
\endcode
*/
class GTSAM_EXPORT HessianFactor : public GaussianFactor {
protected:
typedef Matrix InfoMatrix; ///< The full augmented Hessian
typedef SymmetricBlockView<InfoMatrix> BlockInfo; ///< A blockwise view of the Hessian
typedef GaussianFactor Base; ///< Typedef to base class
InfoMatrix matrix_; ///< The full augmented information matrix, s.t. the quadratic error is 0.5*[x -1]'*H*[x -1]
BlockInfo info_; ///< The block view of the full information matrix.
public:
typedef boost::shared_ptr<HessianFactor> shared_ptr; ///< A shared_ptr to this
typedef BlockInfo::Block Block; ///< A block from the Hessian matrix
typedef BlockInfo::constBlock constBlock; ///< A block from the Hessian matrix (const version)
typedef BlockInfo::Column Column; ///< A column containing the linear term h
typedef BlockInfo::constColumn constColumn; ///< A column containing the linear term h (const version)
/** Copy constructor */
HessianFactor(const HessianFactor& gf);
/** default constructor for I/O */
HessianFactor();
/** Construct a unary factor. G is the quadratic term (Hessian matrix), g
* the linear term (a vector), and f the constant term. The quadratic
* error is:
* 0.5*(f - 2*x'*g + x'*G*x)
*/
HessianFactor(Index j, const Matrix& G, const Vector& g, double f);
/** Construct a unary factor, given a mean and covariance matrix.
* error is 0.5*(x-mu)'*inv(Sigma)*(x-mu)
*/
HessianFactor(Index j, const Vector& mu, const Matrix& Sigma);
/** Construct a binary factor. Gxx are the upper-triangle blocks of the
* quadratic term (the Hessian matrix), gx the pieces of the linear vector
* term, and f the constant term.
* JacobianFactor error is \f[ 0.5* (Ax-b)' M (Ax-b) = 0.5*x'A'MAx - x'A'Mb + 0.5*b'Mb \f]
* HessianFactor error is \f[ 0.5*(x'Gx - 2x'g + f) = 0.5*x'Gx - x'*g + 0.5*f \f]
* So, with \f$ A = [A1 A2] \f$ and \f$ G=A*'M*A = [A1';A2']*M*[A1 A2] \f$ we have
\code
n1*n1 G11 = A1'*M*A1
n1*n2 G12 = A1'*M*A2
n2*n2 G22 = A2'*M*A2
n1*1 g1 = A1'*M*b
n2*1 g2 = A2'*M*b
1*1 f = b'*M*b
\endcode
*/
HessianFactor(Index j1, Index j2,
const Matrix& G11, const Matrix& G12, const Vector& g1,
const Matrix& G22, const Vector& g2, double f);
/** Construct a ternary factor. Gxx are the upper-triangle blocks of the
* quadratic term (the Hessian matrix), gx the pieces of the linear vector
* term, and f the constant term.
*/
HessianFactor(Index j1, Index j2, Index j3,
const Matrix& G11, const Matrix& G12, const Matrix& G13, const Vector& g1,
const Matrix& G22, const Matrix& G23, const Vector& g2,
const Matrix& G33, const Vector& g3, double f);
/** Construct an n-way factor. Gs contains the upper-triangle blocks of the
* quadratic term (the Hessian matrix) provided in row-order, gs the pieces
* of the linear vector term, and f the constant term.
*/
HessianFactor(const std::vector<Index>& js, const std::vector<Matrix>& Gs,
const std::vector<Vector>& gs, double f);
/** Construct from Conditional Gaussian */
explicit HessianFactor(const GaussianConditional& cg);
/** Convert from a JacobianFactor (computes A^T * A) or HessianFactor */
explicit HessianFactor(const GaussianFactor& factor);
/** Combine a set of factors into a single dense HessianFactor */
HessianFactor(const FactorGraph<GaussianFactor>& factors);
/** Destructor */
virtual ~HessianFactor() {}
/** Aassignment operator */
HessianFactor& operator=(const HessianFactor& rhs);
/** Clone this JacobianFactor */
virtual GaussianFactor::shared_ptr clone() const {
return boost::static_pointer_cast<GaussianFactor>(
shared_ptr(new HessianFactor(*this)));
}
/** Print the factor for debugging and testing (implementing Testable) */
virtual void print(const std::string& s = "",
const IndexFormatter& formatter = DefaultIndexFormatter) const;
/** Compare to another factor for testing (implementing Testable) */
virtual bool equals(const GaussianFactor& lf, double tol = 1e-9) const;
/** Evaluate the factor error f(x), see above. */
virtual double error(const VectorValues& c) const; /** 0.5*[x -1]'*H*[x -1] (also see constructor documentation) */
/** Return the dimension of the variable pointed to by the given key iterator
* todo: Remove this in favor of keeping track of dimensions with variables?
* @param variable An iterator pointing to the slot in this factor. You can
* use, for example, begin() + 2 to get the 3rd variable in this factor.
*/
virtual size_t getDim(const_iterator variable) const { return info_(variable-this->begin(), 0).rows(); }
/** Return the number of columns and rows of the Hessian matrix */
size_t rows() const { return info_.rows(); }
/**
* Construct the corresponding anti-factor to negate information
* stored stored in this factor.
* @return a HessianFactor with negated Hessian matrices
*/
virtual GaussianFactor::shared_ptr negate() const;
/** Return a view of the block at (j1,j2) of the <em>upper-triangular part</em> of the
* information matrix \f$ H \f$, no data is copied. See HessianFactor class documentation
* above to explain that only the upper-triangular part of the information matrix is stored
* and returned by this function.
* @param j1 Which block row to get, as an iterator pointing to the slot in this factor. You can
* use, for example, begin() + 2 to get the 3rd variable in this factor.
* @param j2 Which block column to get, as an iterator pointing to the slot in this factor. You can
* use, for example, begin() + 2 to get the 3rd variable in this factor.
* @return A view of the requested block, not a copy.
*/
constBlock info(const_iterator j1, const_iterator j2) const { return info_(j1-begin(), j2-begin()); }
/** Return a view of the block at (j1,j2) of the <em>upper-triangular part</em> of the
* information matrix \f$ H \f$, no data is copied. See HessianFactor class documentation
* above to explain that only the upper-triangular part of the information matrix is stored
* and returned by this function.
* @param j1 Which block row to get, as an iterator pointing to the slot in this factor. You can
* use, for example, begin() + 2 to get the 3rd variable in this factor.
* @param j2 Which block column to get, as an iterator pointing to the slot in this factor. You can
* use, for example, begin() + 2 to get the 3rd variable in this factor.
* @return A view of the requested block, not a copy.
*/
Block info(iterator j1, iterator j2) { return info_(j1-begin(), j2-begin()); }
/** Return the <em>upper-triangular part</em> of the full *augmented* information matrix,
* as described above. See HessianFactor class documentation above to explain that only the
* upper-triangular part of the information matrix is stored and returned by this function.
*/
constBlock info() const { return info_.full(); }
/** Return the <em>upper-triangular part</em> of the full *augmented* information matrix,
* as described above. See HessianFactor class documentation above to explain that only the
* upper-triangular part of the information matrix is stored and returned by this function.
*/
Block info() { return info_.full(); }
/** Return the constant term \f$ f \f$ as described above
* @return The constant term \f$ f \f$
*/
double constantTerm() const { return info_(this->size(), this->size())(0,0); }
/** Return the constant term \f$ f \f$ as described above
* @return The constant term \f$ f \f$
*/
double& constantTerm() { return info_(this->size(), this->size())(0,0); }
/** Return the part of linear term \f$ g \f$ as described above corresponding to the requested variable.
* @param j Which block row to get, as an iterator pointing to the slot in this factor. You can
* use, for example, begin() + 2 to get the 3rd variable in this factor.
* @return The linear term \f$ g \f$ */
constColumn linearTerm(const_iterator j) const { return info_.column(j-begin(), size(), 0); }
/** Return the part of linear term \f$ g \f$ as described above corresponding to the requested variable.
* @param j Which block row to get, as an iterator pointing to the slot in this factor. You can
* use, for example, begin() + 2 to get the 3rd variable in this factor.
* @return The linear term \f$ g \f$ */
Column linearTerm(iterator j) { return info_.column(j-begin(), size(), 0); }
/** Return the complete linear term \f$ g \f$ as described above.
* @return The linear term \f$ g \f$ */
constColumn linearTerm() const { return info_.rangeColumn(0, this->size(), this->size(), 0); }
/** Return the complete linear term \f$ g \f$ as described above.
* @return The linear term \f$ g \f$ */
Column linearTerm() { return info_.rangeColumn(0, this->size(), this->size(), 0); }
/** Return the augmented information matrix represented by this GaussianFactor.
* The augmented information matrix contains the information matrix with an
* additional column holding the information vector, and an additional row
* holding the transpose of the information vector. The lower-right entry
* contains the constant error term (when \f$ \delta x = 0 \f$). The
* augmented information matrix is described in more detail in HessianFactor,
* which in fact stores an augmented information matrix.
*
* For HessianFactor, this is the same as info() except that this function
* returns a complete symmetric matrix whereas info() returns a matrix where
* only the upper triangle is valid, but should be interpreted as symmetric.
* This is because info() returns only a reference to the internal
* representation of the augmented information matrix, which stores only the
* upper triangle.
*/
virtual Matrix augmentedInformation() const;
/** Return the non-augmented information matrix represented by this
* GaussianFactor.
*/
virtual Matrix information() const;
// Friend unit test classes
friend class ::HessianFactorConversionConstructorTest;
friend class ::HessianFactorConstructor1Test;
friend class ::HessianFactorConstructor1bTest;
friend class ::HessianFactorcombineTest;
// Friend JacobianFactor for conversion
friend class JacobianFactor;
// used in eliminateCholesky:
/**
* Do Cholesky. Note that after this, the lower triangle still contains
* some untouched non-zeros that should be zero. We zero them while
* extracting submatrices in splitEliminatedFactor. Frank says :-(
*/
void partialCholesky(size_t nrFrontals);
/** split partially eliminated factor */
boost::shared_ptr<GaussianConditional> splitEliminatedFactor(size_t nrFrontals);
/** Update the factor by adding the information from the JacobianFactor
* (used internally during elimination).
* @param update The JacobianFactor containing the new information to add
* @param scatter A mapping from variable index to slot index in this HessianFactor
*/
void updateATA(const JacobianFactor& update, const Scatter& scatter);
/** Update the factor by adding the information from the HessianFactor
* (used internally during elimination).
* @param update The HessianFactor containing the new information to add
* @param scatter A mapping from variable index to slot index in this HessianFactor
*/
void updateATA(const HessianFactor& update, const Scatter& scatter);
/** assert invariants */
void assertInvariants() const;
private:
/** Serialization function */
friend class boost::serialization::access;
template<class ARCHIVE>
void serialize(ARCHIVE & ar, const unsigned int version) {
ar & BOOST_SERIALIZATION_BASE_OBJECT_NVP(GaussianFactor);
ar & BOOST_SERIALIZATION_NVP(info_);
ar & BOOST_SERIALIZATION_NVP(matrix_);
}
};
}

View File

@ -20,7 +20,7 @@
#include <gtsam/linear/linearExceptions.h>
#include <gtsam/linear/GaussianConditionalUnordered.h>
#include <gtsam/linear/JacobianFactorUnordered.h>
#include <gtsam/linear/HessianFactorUnordered.h>
//#include <gtsam/linear/HessianFactorUnordered.h>
#include <gtsam/linear/GaussianFactorGraphUnordered.h>
#include <gtsam/linear/VectorValuesUnordered.h>
#include <gtsam/inference/VariableSlots.h>

View File

@ -213,5 +213,12 @@ namespace gtsam {
}
/* ************************************************************************* */
void scal(double alpha, VectorValuesUnordered& x)
{
BOOST_FOREACH(Vector& v, x | map_values)
v *= alpha;
}
/* ************************************************************************* */
} // \namespace gtsam

View File

@ -253,12 +253,10 @@ namespace gtsam {
// return result;
//}
///// TODO: linear algebra interface seems to have been added for SPCG.
//friend void scal(double alpha, VectorValuesUnordered& x) {
// for(Key j = 0; j < x.size(); ++j)
// x.values_[j] *= alpha;
//}
///// TODO: linear algebra interface seems to have been added for SPCG.
// TODO: linear algebra interface seems to have been added for SPCG.
friend void scal(double alpha, VectorValuesUnordered& x);
//// TODO: linear algebra interface seems to have been added for SPCG.
//friend void axpy(double alpha, const VectorValuesUnordered& x, VectorValuesUnordered& y) {
// if(x.size() != y.size())
// throw std::invalid_argument("axpy(VectorValues) called with different vector sizes");
@ -268,13 +266,13 @@ namespace gtsam {
// else
// throw std::invalid_argument("axpy(VectorValues) called with different vector sizes");
//}
///// TODO: linear algebra interface seems to have been added for SPCG.
//// TODO: linear algebra interface seems to have been added for SPCG.
//friend void sqrt(VectorValuesUnordered &x) {
// for(Key j = 0; j < x.size(); ++j)
// x.values_[j] = x.values_[j].cwiseSqrt();
//}
///// TODO: linear algebra interface seems to have been added for SPCG.
//// TODO: linear algebra interface seems to have been added for SPCG.
//friend void ediv(const VectorValuesUnordered& numerator, const VectorValuesUnordered& denominator, VectorValuesUnordered &result) {
// if(numerator.size() != denominator.size() || numerator.size() != result.size())
// throw std::invalid_argument("ediv(VectorValues) called with different vector sizes");
@ -285,7 +283,7 @@ namespace gtsam {
// throw std::invalid_argument("ediv(VectorValues) called with different vector sizes");
//}
///// TODO: linear algebra interface seems to have been added for SPCG.
//// TODO: linear algebra interface seems to have been added for SPCG.
//friend void edivInPlace(VectorValuesUnordered& x, const VectorValuesUnordered& y) {
// if(x.size() != y.size())
// throw std::invalid_argument("edivInPlace(VectorValues) called with different vector sizes");