Refactored synchronization code for Concurrent Batch Smoother
parent
e4b452473e
commit
dce575cb73
|
@ -42,7 +42,6 @@ bool ConcurrentBatchSmoother::equals(const ConcurrentSmoother& rhs, double tol)
|
|||
&& theta_.equals(smoother->theta_)
|
||||
&& ordering_.equals(smoother->ordering_)
|
||||
&& delta_.equals(smoother->delta_)
|
||||
&& variableIndex_.equals(smoother->variableIndex_)
|
||||
&& separatorValues_.equals(smoother->separatorValues_);
|
||||
}
|
||||
|
||||
|
@ -56,22 +55,24 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearF
|
|||
|
||||
// Update all of the internal variables with the new information
|
||||
gttic(augment_system);
|
||||
// Add the new variables to theta
|
||||
theta_.insert(newTheta);
|
||||
// Add new variables to the end of the ordering
|
||||
std::vector<size_t> dims;
|
||||
dims.reserve(newTheta.size());
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
dims.push_back(key_value.value.dim());
|
||||
{
|
||||
// Add the new variables to theta
|
||||
theta_.insert(newTheta);
|
||||
// Add new variables to the end of the ordering
|
||||
std::vector<size_t> dims;
|
||||
dims.reserve(newTheta.size());
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
dims.push_back(key_value.value.dim());
|
||||
}
|
||||
// Augment Delta
|
||||
delta_.append(dims);
|
||||
for(size_t i = delta_.size() - dims.size(); i < delta_.size(); ++i) {
|
||||
delta_[i].setZero();
|
||||
}
|
||||
// Add the new factors to the graph, updating the variable index
|
||||
insertFactors(newFactors);
|
||||
}
|
||||
// Augment Delta
|
||||
delta_.append(dims);
|
||||
for(size_t i = delta_.size() - dims.size(); i < delta_.size(); ++i) {
|
||||
delta_[i].setZero();
|
||||
}
|
||||
// Add the new factors to the graph, updating the variable index
|
||||
insertFactors(newFactors);
|
||||
gttoc(augment_system);
|
||||
|
||||
// Reorder the system to ensure efficient optimization (and marginalization) performance
|
||||
|
@ -87,181 +88,17 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearF
|
|||
gttoc(optimize);
|
||||
|
||||
|
||||
// Moved presync code into the update function. Generally, only one call to smoother.update(*) is performed
|
||||
// between synchronizations, so no extra work is being done. This also allows the presync code to be performed
|
||||
// while the filter is still running (instead of during the synchronization when the filter is paused)
|
||||
gttic(presync);
|
||||
if(separatorValues_.size() > 0) {
|
||||
updateSmootherSummarization();
|
||||
}
|
||||
gttoc(presync);
|
||||
|
||||
gttoc(update);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// gttic(update);
|
||||
//
|
||||
// // Create result structure
|
||||
// Result result;
|
||||
//
|
||||
// gttic(augment_system);
|
||||
// // Add the new factors to the graph
|
||||
// BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, newFactors) {
|
||||
// insertFactor(factor);
|
||||
// }
|
||||
// // Add the new variables to theta
|
||||
// theta_.insert(newTheta);
|
||||
// gttoc(augment_system);
|
||||
//
|
||||
// // Optimize the graph, updating theta
|
||||
// gttic(optimize);
|
||||
// if(graph_.size() > 0){
|
||||
// optimize();
|
||||
//
|
||||
// // TODO: fill in the results structure properly
|
||||
// result.iterations = 0;
|
||||
// result.nonlinearVariables = theta_.size() - separatorValues_.size();
|
||||
// result.linearVariables = separatorValues_.size();
|
||||
// result.error = 0;
|
||||
// }
|
||||
// gttoc(optimize);
|
||||
//
|
||||
// // Move all of the Pre-Sync code to the end of the update. This allows the smoother to perform these
|
||||
// // calculations while the filter is still running
|
||||
// gttic(presync);
|
||||
// // Calculate and store the information passed up to the separator. This requires:
|
||||
// // 1) Calculate an ordering that forces the separator variables to be in the root
|
||||
// // 2) Eliminate all of the variables except the root. This produces one or more
|
||||
// // linear, marginal factors on the separator variables.
|
||||
// // 3) Convert the marginal factors into nonlinear ones using the 'LinearContainerFactor'
|
||||
//
|
||||
// if(separatorValues_.size() > 0) {
|
||||
// // Force variables associated with root keys to keep the same linearization point
|
||||
// gttic(enforce_consistency);
|
||||
// Values linpoint;
|
||||
// linpoint.insert(theta_);
|
||||
// linpoint.insert(separatorValues_);
|
||||
//
|
||||
////linpoint.print("ConcurrentBatchSmoother::presync() LinPoint:\n");
|
||||
//
|
||||
// gttoc(enforce_consistency);
|
||||
//
|
||||
// // Calculate a root-constrained ordering
|
||||
// gttic(compute_ordering);
|
||||
// std::map<Key, int> constraints;
|
||||
// BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, separatorValues_) {
|
||||
// constraints[key_value.key] = 1;
|
||||
// }
|
||||
// Ordering ordering = *graph_.orderingCOLAMDConstrained(linpoint, constraints);
|
||||
// gttoc(compute_ordering);
|
||||
//
|
||||
// // Create a Bayes Tree using iSAM2 cliques
|
||||
// gttic(create_bayes_tree);
|
||||
// JunctionTree<GaussianFactorGraph, ISAM2Clique> jt(*graph_.linearize(linpoint, ordering));
|
||||
// ISAM2Clique::shared_ptr root = jt.eliminate(parameters_.getEliminationFunction());
|
||||
// BayesTree<GaussianConditional, ISAM2Clique> bayesTree;
|
||||
// bayesTree.insert(root);
|
||||
// gttoc(create_bayes_tree);
|
||||
//
|
||||
////ordering.print("ConcurrentBatchSmoother::presync() Ordering:\n");
|
||||
//std::cout << "ConcurrentBatchSmoother::presync() Root Keys: "; BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, separatorValues_) { std::cout << DefaultKeyFormatter(key_value.key) << " "; } std::cout << std::endl;
|
||||
//std::cout << "ConcurrentBatchSmoother::presync() Bayes Tree:" << std::endl;
|
||||
////SymbolicPrintTree(root, ordering, " ");
|
||||
//
|
||||
// // Extract the marginal factors from the smoother
|
||||
// // For any non-filter factor that involves a root variable,
|
||||
// // calculate its marginal on the root variables using the
|
||||
// // current linearization point.
|
||||
//
|
||||
// // Find all of the smoother branches as the children of root cliques that are not also root cliques
|
||||
// gttic(find_smoother_branches);
|
||||
// std::set<ISAM2Clique::shared_ptr> rootCliques;
|
||||
// std::set<ISAM2Clique::shared_ptr> smootherBranches;
|
||||
// BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, separatorValues_) {
|
||||
// const ISAM2Clique::shared_ptr& clique = bayesTree.nodes().at(ordering.at(key_value.key));
|
||||
// if(clique) {
|
||||
// rootCliques.insert(clique);
|
||||
// smootherBranches.insert(clique->children().begin(), clique->children().end());
|
||||
// }
|
||||
// }
|
||||
// BOOST_FOREACH(const ISAM2Clique::shared_ptr& rootClique, rootCliques) {
|
||||
// smootherBranches.erase(rootClique);
|
||||
// }
|
||||
// gttoc(find_smoother_branches);
|
||||
//
|
||||
// // Extract the cached factors on the root cliques from the smoother branches
|
||||
// gttic(extract_cached_factors);
|
||||
// GaussianFactorGraph cachedFactors;
|
||||
// BOOST_FOREACH(const ISAM2Clique::shared_ptr& clique, smootherBranches) {
|
||||
// cachedFactors.push_back(clique->cachedFactor());
|
||||
// }
|
||||
// gttoc(extract_cached_factors);
|
||||
//
|
||||
//std::cout << "ConcurrentBatchSmoother::presync() Cached Factors Before:" << std::endl;
|
||||
//BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, cachedFactors) {
|
||||
// std::cout << " g( ";
|
||||
// BOOST_FOREACH(Index index, factor->keys()) {
|
||||
// std::cout << DefaultKeyFormatter(ordering.key(index)) << " ";
|
||||
// }
|
||||
// std::cout << ")" << std::endl;
|
||||
//}
|
||||
//
|
||||
// // Marginalize out any additional (non-root) variables
|
||||
// gttic(marginalize_extra_variables);
|
||||
// // The rootKeys have been ordered last, so their linear indices will be { linpoint.size()-rootKeys.size() :: linpoint.size()-1 }
|
||||
// Index minRootIndex = linpoint.size() - separatorValues_.size();
|
||||
// // Calculate the set of keys to be marginalized
|
||||
// FastSet<Index> cachedIndices = cachedFactors.keys();
|
||||
// std::vector<Index> marginalizeIndices;
|
||||
// std::remove_copy_if(cachedIndices.begin(), cachedIndices.end(), std::back_inserter(marginalizeIndices), boost::lambda::_1 >= minRootIndex);
|
||||
//
|
||||
//std::cout << "ConcurrentBatchSmoother::presync() Marginalize Keys: ";
|
||||
//BOOST_FOREACH(Index index, marginalizeIndices) { std::cout << DefaultKeyFormatter(ordering.key(index)) << " "; }
|
||||
//std::cout << std::endl;
|
||||
//
|
||||
// // If non-root-keys are present, marginalize them out
|
||||
// if(marginalizeIndices.size() > 0) {
|
||||
// // Eliminate the extra variables, stored the remaining factors back into the 'cachedFactors' graph
|
||||
// GaussianConditional::shared_ptr conditional;
|
||||
// boost::tie(conditional, cachedFactors) = cachedFactors.eliminate(marginalizeIndices, parameters_.getEliminationFunction());
|
||||
// }
|
||||
// gttoc(marginalize_extra_variables);
|
||||
//
|
||||
//std::cout << "ConcurrentBatchSmoother::presync() Cached Factors After:" << std::endl;
|
||||
//BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, cachedFactors) {
|
||||
// std::cout << " g( ";
|
||||
// BOOST_FOREACH(Index index, factor->keys()) {
|
||||
// std::cout << DefaultKeyFormatter(ordering.key(index)) << " ";
|
||||
// }
|
||||
// std::cout << ")" << std::endl;
|
||||
//}
|
||||
//
|
||||
// // Convert factors into 'Linearized' nonlinear factors
|
||||
// gttic(store_cached_factors);
|
||||
// smootherSummarization_.resize(0);
|
||||
// BOOST_FOREACH(const GaussianFactor::shared_ptr& gaussianFactor, cachedFactors) {
|
||||
// LinearContainerFactor::shared_ptr factor(new LinearContainerFactor(gaussianFactor, ordering, linpoint));
|
||||
// smootherSummarization_.push_back(factor);
|
||||
// }
|
||||
// gttoc(store_cached_factors);
|
||||
//
|
||||
//std::cout << "ConcurrentBatchSmoother::presync() Smoother Summarization:" << std::endl;
|
||||
//BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, smootherSummarization_) {
|
||||
// std::cout << " f( ";
|
||||
// BOOST_FOREACH(Key key, factor->keys()) {
|
||||
// std::cout << DefaultKeyFormatter(key) << " ";
|
||||
// }
|
||||
// std::cout << ")" << std::endl;
|
||||
//}
|
||||
//
|
||||
// }
|
||||
// gttoc(presync);
|
||||
//
|
||||
// gttoc(update);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -366,9 +203,6 @@ std::vector<size_t> ConcurrentBatchSmoother::insertFactors(const NonlinearFactor
|
|||
slots.push_back(slot);
|
||||
}
|
||||
|
||||
// Augment the Variable Index
|
||||
variableIndex_.augment(*factors.symbolic(ordering_));
|
||||
|
||||
gttoc(insert_factors);
|
||||
|
||||
return slots;
|
||||
|
@ -392,15 +226,15 @@ void ConcurrentBatchSmoother::removeFactors(const std::vector<size_t>& slots) {
|
|||
availableSlots_.push(slot);
|
||||
}
|
||||
|
||||
// Remove references to this factor from the VariableIndex
|
||||
variableIndex_.remove(slots, factors);
|
||||
|
||||
gttoc(remove_factors);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void ConcurrentBatchSmoother::reorder() {
|
||||
|
||||
// Recalculate the variable index
|
||||
variableIndex_ = VariableIndex(*factors_.symbolic(ordering_));
|
||||
|
||||
// Initialize all variables to group0
|
||||
std::vector<int> cmember(variableIndex_.size(), 0);
|
||||
|
||||
|
@ -529,6 +363,48 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() {
|
|||
return result;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void ConcurrentBatchSmoother::updateSmootherSummarization() {
|
||||
|
||||
// The smoother summarization factors are the resulting marginal factors on the separator
|
||||
// variables that result from marginalizing out all of the other variables
|
||||
// These marginal factors will be cached for later transmission to the filter using
|
||||
// linear container factors
|
||||
// Note: This assumes the ordering already has the separator variables eliminated last
|
||||
|
||||
// Clear out any existing smoother summarized factors
|
||||
smootherSummarization_.resize(0);
|
||||
|
||||
// Create the linear factor graph
|
||||
gtsam::GaussianFactorGraph linearFactorGraph = *factors_.linearize(theta_, ordering_);
|
||||
|
||||
// Construct an elimination tree to perform sparse elimination
|
||||
std::vector<EliminationForest::shared_ptr> forest( EliminationForest::Create(linearFactorGraph, variableIndex_) );
|
||||
|
||||
// This is a forest. Only the top-most node/index of each tree needs to be eliminated; all of the children will be eliminated automatically
|
||||
// Find the subset of nodes/keys that must be eliminated
|
||||
std::set<gtsam::Index> indicesToEliminate;
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, theta_) {
|
||||
indicesToEliminate.insert(ordering_.at(key_value.key));
|
||||
}
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, separatorValues_) {
|
||||
indicesToEliminate.erase(ordering_.at(key_value.key));
|
||||
}
|
||||
std::vector<Index> indices(indicesToEliminate.begin(), indicesToEliminate.end());
|
||||
BOOST_FOREACH(Index index, indices) {
|
||||
EliminationForest::removeChildrenIndices(indicesToEliminate, forest.at(index));
|
||||
}
|
||||
|
||||
// Eliminate each top-most key, returning a Gaussian Factor on some of the remaining variables
|
||||
// Convert the marginal factors into Linear Container Factors and store
|
||||
BOOST_FOREACH(gtsam::Index index, indicesToEliminate) {
|
||||
GaussianFactor::shared_ptr gaussianFactor = forest.at(index)->eliminateRecursive(parameters_.getEliminationFunction());
|
||||
LinearContainerFactor::shared_ptr marginalFactor(new LinearContainerFactor(gaussianFactor, ordering_, theta_));
|
||||
smootherSummarization_.push_back(marginalFactor);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void ConcurrentBatchSmoother::PrintNonlinearFactor(const NonlinearFactor::shared_ptr& factor, const std::string& indent, const KeyFormatter& keyFormatter) {
|
||||
std::cout << indent;
|
||||
|
@ -561,6 +437,96 @@ void ConcurrentBatchSmoother::PrintLinearFactor(const GaussianFactor::shared_ptr
|
|||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::vector<Index> ConcurrentBatchSmoother::EliminationForest::ComputeParents(const VariableIndex& structure) {
|
||||
// Number of factors and variables
|
||||
const size_t m = structure.nFactors();
|
||||
const size_t n = structure.size();
|
||||
|
||||
static const gtsam::Index none = std::numeric_limits<gtsam::Index>::max();
|
||||
|
||||
// Allocate result parent vector and vector of last factor columns
|
||||
std::vector<gtsam::Index> parents(n, none);
|
||||
std::vector<gtsam::Index> prevCol(m, none);
|
||||
|
||||
// for column j \in 1 to n do
|
||||
for (gtsam::Index j = 0; j < n; j++) {
|
||||
// for row i \in Struct[A*j] do
|
||||
BOOST_FOREACH(const size_t i, structure[j]) {
|
||||
if (prevCol[i] != none) {
|
||||
gtsam::Index k = prevCol[i];
|
||||
// find root r of the current tree that contains k
|
||||
gtsam::Index r = k;
|
||||
while (parents[r] != none)
|
||||
r = parents[r];
|
||||
if (r != j) parents[r] = j;
|
||||
}
|
||||
prevCol[i] = j;
|
||||
}
|
||||
}
|
||||
|
||||
return parents;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::vector<ConcurrentBatchSmoother::EliminationForest::shared_ptr> ConcurrentBatchSmoother::EliminationForest::Create(const gtsam::GaussianFactorGraph& factorGraph, const gtsam::VariableIndex& structure) {
|
||||
// Compute the tree structure
|
||||
std::vector<gtsam::Index> parents(ComputeParents(structure));
|
||||
|
||||
// Number of variables
|
||||
const size_t n = structure.size();
|
||||
|
||||
static const gtsam::Index none = std::numeric_limits<gtsam::Index>::max();
|
||||
|
||||
// Create tree structure
|
||||
std::vector<shared_ptr> trees(n);
|
||||
for (gtsam::Index k = 1; k <= n; k++) {
|
||||
gtsam::Index j = n - k; // Start at the last variable and loop down to 0
|
||||
trees[j].reset(new EliminationForest(j)); // Create a new node on this variable
|
||||
if (parents[j] != none) // If this node has a parent, add it to the parent's children
|
||||
trees[parents[j]]->add(trees[j]);
|
||||
}
|
||||
|
||||
// Hang factors in right places
|
||||
BOOST_FOREACH(const sharedFactor& factor, factorGraph) {
|
||||
if(factor && factor->size() > 0) {
|
||||
gtsam::Index j = *std::min_element(factor->begin(), factor->end());
|
||||
if(j < structure.size())
|
||||
trees[j]->add(factor);
|
||||
}
|
||||
}
|
||||
|
||||
return trees;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
ConcurrentBatchSmoother::EliminationForest::sharedFactor ConcurrentBatchSmoother::EliminationForest::eliminateRecursive(Eliminate function) {
|
||||
|
||||
// Create the list of factors to be eliminated, initially empty, and reserve space
|
||||
gtsam::GaussianFactorGraph factors;
|
||||
factors.reserve(this->factors_.size() + this->subTrees_.size());
|
||||
|
||||
// Add all factors associated with the current node
|
||||
factors.push_back(this->factors_.begin(), this->factors_.end());
|
||||
|
||||
// for all subtrees, eliminate into Bayes net and a separator factor, added to [factors]
|
||||
BOOST_FOREACH(const shared_ptr& child, subTrees_)
|
||||
factors.push_back(child->eliminateRecursive(function));
|
||||
|
||||
// Combine all factors (from this node and from subtrees) into a joint factor
|
||||
gtsam::GaussianFactorGraph::EliminationResult eliminated(function(factors, 1));
|
||||
|
||||
return eliminated.second;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void ConcurrentBatchSmoother::EliminationForest::removeChildrenIndices(std::set<Index>& indices, const ConcurrentBatchSmoother::EliminationForest::shared_ptr& tree) {
|
||||
BOOST_FOREACH(const EliminationForest::shared_ptr& child, tree->children()) {
|
||||
indices.erase(child->key());
|
||||
removeChildrenIndices(indices, child);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -76,16 +76,11 @@ public:
|
|||
return delta_;
|
||||
}
|
||||
|
||||
/** Access the nonlinear variable index */
|
||||
const VariableIndex& getVariableIndex() const {
|
||||
return variableIndex_;
|
||||
}
|
||||
|
||||
/** Compute the current best estimate of all variables and return a full Values structure.
|
||||
* If only a single variable is needed, it may be faster to call calculateEstimate(const KEY&).
|
||||
*/
|
||||
Values calculateEstimate() const {
|
||||
return getLinearizationPoint().retract(getDelta(), getOrdering());
|
||||
return theta_.retract(delta_, ordering_);
|
||||
}
|
||||
|
||||
/** Compute the current best estimate of a single variable. This is generally faster than
|
||||
|
@ -95,9 +90,9 @@ public:
|
|||
*/
|
||||
template<class VALUE>
|
||||
VALUE calculateEstimate(Key key) const {
|
||||
const Index index = getOrdering().at(key);
|
||||
const Vector delta = getDelta().at(index);
|
||||
return getLinearizationPoint().at<VALUE>(key).retract(delta);
|
||||
const Index index = ordering_.at(key);
|
||||
const Vector delta = delta_.at(index);
|
||||
return theta_.at<VALUE>(key).retract(delta);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -164,6 +159,8 @@ protected:
|
|||
*/
|
||||
virtual void postsync();
|
||||
|
||||
private:
|
||||
|
||||
/** Augment the graph with new factors
|
||||
*
|
||||
* @param factors The factors to add to the graph
|
||||
|
@ -183,15 +180,71 @@ protected:
|
|||
/** Use a modified version of L-M to update the linearization point and delta */
|
||||
Result optimize();
|
||||
|
||||
private:
|
||||
/** Some printing functions for debugging */
|
||||
/** Calculate the smoother marginal factors on the separator variables */
|
||||
void updateSmootherSummarization();
|
||||
|
||||
/** Print just the nonlinear keys in a nonlinear factor */
|
||||
static void PrintNonlinearFactor(const NonlinearFactor::shared_ptr& factor,
|
||||
const std::string& indent = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
/** Print just the nonlinear keys in a linear factor */
|
||||
static void PrintLinearFactor(const GaussianFactor::shared_ptr& factor, const Ordering& ordering,
|
||||
const std::string& indent = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
// A custom elimination tree that supports forests and partial elimination
|
||||
class EliminationForest {
|
||||
public:
|
||||
typedef boost::shared_ptr<EliminationForest> shared_ptr; ///< Shared pointer to this class
|
||||
typedef gtsam::GaussianFactor Factor; ///< The factor Type
|
||||
typedef Factor::shared_ptr sharedFactor; ///< Shared pointer to a factor
|
||||
typedef gtsam::BayesNet<Factor::ConditionalType> BayesNet; ///< The BayesNet
|
||||
typedef gtsam::GaussianFactorGraph::Eliminate Eliminate; ///< The eliminate subroutine
|
||||
|
||||
private:
|
||||
typedef gtsam::FastList<sharedFactor> Factors;
|
||||
typedef gtsam::FastList<shared_ptr> SubTrees;
|
||||
typedef std::vector<Factor::ConditionalType::shared_ptr> Conditionals;
|
||||
|
||||
gtsam::Index key_; ///< index associated with root
|
||||
Factors factors_; ///< factors associated with root
|
||||
SubTrees subTrees_; ///< sub-trees
|
||||
|
||||
/** default constructor, private, as you should use Create below */
|
||||
EliminationForest(gtsam::Index key = 0) : key_(key) {}
|
||||
|
||||
/**
|
||||
* Static internal function to build a vector of parent pointers using the
|
||||
* algorithm of Gilbert et al., 2001, BIT.
|
||||
*/
|
||||
static std::vector<gtsam::Index> ComputeParents(const gtsam::VariableIndex& structure);
|
||||
|
||||
/** add a factor, for Create use only */
|
||||
void add(const sharedFactor& factor) { factors_.push_back(factor); }
|
||||
|
||||
/** add a subtree, for Create use only */
|
||||
void add(const shared_ptr& child) { subTrees_.push_back(child); }
|
||||
|
||||
public:
|
||||
|
||||
/** return the key associated with this tree node */
|
||||
gtsam::Index key() const { return key_; }
|
||||
|
||||
/** return the const reference of children */
|
||||
const SubTrees& children() const { return subTrees_; }
|
||||
|
||||
/** return the const reference to the factors */
|
||||
const Factors& factors() const { return factors_; }
|
||||
|
||||
/** Create an elimination tree from a factor graph */
|
||||
static std::vector<shared_ptr> Create(const gtsam::GaussianFactorGraph& factorGraph, const gtsam::VariableIndex& structure);
|
||||
|
||||
/** Recursive routine that eliminates the factors arranged in an elimination tree */
|
||||
sharedFactor eliminateRecursive(Eliminate function);
|
||||
|
||||
/** Recursive function that helps find the top of each tree */
|
||||
static void removeChildrenIndices(std::set<Index>& indices, const EliminationForest::shared_ptr& tree);
|
||||
};
|
||||
|
||||
}; // ConcurrentBatchSmoother
|
||||
|
||||
}/// namespace gtsam
|
||||
|
|
|
@ -19,12 +19,15 @@
|
|||
#include <gtsam_unstable/nonlinear/ConcurrentBatchSmoother.h>
|
||||
#include <gtsam/slam/PriorFactor.h>
|
||||
#include <gtsam/slam/BetweenFactor.h>
|
||||
#include <gtsam/nonlinear/ISAM2.h>
|
||||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LinearContainerFactor.h>
|
||||
#include <gtsam/nonlinear/Ordering.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/nonlinear/Symbol.h>
|
||||
#include <gtsam/nonlinear/Key.h>
|
||||
#include <gtsam/inference/JunctionTree.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <CppUnitLite/TestHarness.h>
|
||||
|
||||
|
@ -362,60 +365,38 @@ Values BatchOptimize(const NonlinearFactorGraph& graph, const Values& theta, con
|
|||
return optimizer.values();
|
||||
}
|
||||
|
||||
///* ************************************************************************* */
|
||||
//void FindFactorsWithAny(const std::set<Key>& keys, const NonlinearFactorGraph& sourceFactors, NonlinearFactorGraph& destinationFactors) {
|
||||
//
|
||||
// BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, sourceFactors) {
|
||||
// NonlinearFactor::const_iterator key = factor->begin();
|
||||
// while((key != factor->end()) && (!std::binary_search(keys.begin(), keys.end(), *key))) {
|
||||
// ++key;
|
||||
// }
|
||||
// if(key != factor->end()) {
|
||||
// destinationFactors.push_back(factor);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
//}
|
||||
//
|
||||
///* ************************************************************************* */
|
||||
//void FindFactorsWithOnly(const std::set<Key>& keys, const NonlinearFactorGraph& sourceFactors, NonlinearFactorGraph& destinationFactors) {
|
||||
//
|
||||
// BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, sourceFactors) {
|
||||
// NonlinearFactor::const_iterator key = factor->begin();
|
||||
// while((key != factor->end()) && (std::binary_search(keys.begin(), keys.end(), *key))) {
|
||||
// ++key;
|
||||
// }
|
||||
// if(key == factor->end()) {
|
||||
// destinationFactors.push_back(factor);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
//}
|
||||
//
|
||||
///* ************************************************************************* */
|
||||
//typedef BayesTree<GaussianConditional,ISAM2Clique>::sharedClique Clique;
|
||||
//void SymbolicPrintTree(const Clique& clique, const Ordering& ordering, const std::string indent = "") {
|
||||
// std::cout << indent << "P( ";
|
||||
// BOOST_FOREACH(Index index, clique->conditional()->frontals()){
|
||||
// std::cout << DefaultKeyFormatter(ordering.key(index)) << " ";
|
||||
// }
|
||||
// if(clique->conditional()->nrParents() > 0) {
|
||||
// std::cout << "| ";
|
||||
// }
|
||||
// BOOST_FOREACH(Index index, clique->conditional()->parents()){
|
||||
// std::cout << DefaultKeyFormatter(ordering.key(index)) << " ";
|
||||
// }
|
||||
// std::cout << ")" << std::endl;
|
||||
//
|
||||
// BOOST_FOREACH(const Clique& child, clique->children()) {
|
||||
// SymbolicPrintTree(child, ordering, indent+" ");
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//}
|
||||
/* ************************************************************************* */
|
||||
void FindFactorsWithAny(const std::set<Key>& keys, const NonlinearFactorGraph& sourceFactors, NonlinearFactorGraph& destinationFactors) {
|
||||
|
||||
BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, sourceFactors) {
|
||||
NonlinearFactor::const_iterator key = factor->begin();
|
||||
while((key != factor->end()) && (!std::binary_search(keys.begin(), keys.end(), *key))) {
|
||||
++key;
|
||||
}
|
||||
if(key != factor->end()) {
|
||||
destinationFactors.push_back(factor);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( ConcurrentBatchSmoother, update_batch )
|
||||
void FindFactorsWithOnly(const std::set<Key>& keys, const NonlinearFactorGraph& sourceFactors, NonlinearFactorGraph& destinationFactors) {
|
||||
|
||||
BOOST_FOREACH(const NonlinearFactor::shared_ptr& factor, sourceFactors) {
|
||||
NonlinearFactor::const_iterator key = factor->begin();
|
||||
while((key != factor->end()) && (std::binary_search(keys.begin(), keys.end(), *key))) {
|
||||
++key;
|
||||
}
|
||||
if(key == factor->end()) {
|
||||
destinationFactors.push_back(factor);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST( ConcurrentBatchSmoother, update_batch )
|
||||
{
|
||||
// Test the 'update' function of the ConcurrentBatchSmoother in a nonlinear environment.
|
||||
// Thus, a full L-M optimization and the ConcurrentBatchSmoother results should be identical
|
||||
|
@ -446,7 +427,7 @@ TEST_UNSAFE( ConcurrentBatchSmoother, update_batch )
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( ConcurrentBatchSmoother, update_incremental )
|
||||
TEST( ConcurrentBatchSmoother, update_incremental )
|
||||
{
|
||||
// Test the 'update' function of the ConcurrentBatchSmoother in a nonlinear environment.
|
||||
// Thus, a full L-M optimization and the ConcurrentBatchSmoother results should be identical
|
||||
|
@ -487,220 +468,223 @@ TEST_UNSAFE( ConcurrentBatchSmoother, update_incremental )
|
|||
|
||||
}
|
||||
|
||||
///* ************************************************************************* */
|
||||
//TEST_UNSAFE( ConcurrentBatchSmoother, synchronize )
|
||||
//{
|
||||
// // Test the 'synchronize' function of the ConcurrentBatchSmoother in a nonlinear environment.
|
||||
// // The smoother is operating on a known tree structure, so the factors and summarization can
|
||||
// // be predicted for testing purposes
|
||||
//
|
||||
// // Create a set of optimizer parameters
|
||||
// LevenbergMarquardtParams parameters;
|
||||
//
|
||||
// // Create a Concurrent Batch Smoother
|
||||
// ConcurrentBatchSmootherTester smoother(parameters);
|
||||
//
|
||||
// // Create containers to keep the full graph
|
||||
// Values fullTheta;
|
||||
// NonlinearFactorGraph fullGraph;
|
||||
//
|
||||
// // Create factors for times 0 - 12
|
||||
// // When eliminated with ordering (X2 X0 X1 X4 X5 X3 X6 X8 X11 X10 X7 X9 X12)augmentedHessian
|
||||
// // ... this Bayes Tree is produced:
|
||||
// // Bayes Tree:
|
||||
// // P( X7 X9 X12 )
|
||||
// // P( X10 | X9 )
|
||||
// // P( X11 | X10 )
|
||||
// // P( X8 | X9 )
|
||||
// // P( X6 | X7 X9 )
|
||||
// // P( X5 X3 | X6 )
|
||||
// // P( X1 | X3 )
|
||||
// // P( X0 | X1 )
|
||||
// // P( X2 | X3 )
|
||||
// // P( X4 | X7 )
|
||||
// // We then produce the inputs necessary for the 'synchronize' function.
|
||||
// // The smoother is branches X4 and X6, the filter is branches X8 and X10, and the root is (X7 X9 X12)
|
||||
// CreateFactors(fullGraph, fullTheta, 0, 13);
|
||||
//
|
||||
// // Optimize the full graph
|
||||
// Values optimalTheta = BatchOptimize(fullGraph, fullTheta);
|
||||
//
|
||||
// // Re-eliminate to create the Bayes Tree
|
||||
// Ordering ordering;
|
||||
// ordering.push_back(Symbol('X', 2));
|
||||
// ordering.push_back(Symbol('X', 0));
|
||||
// ordering.push_back(Symbol('X', 1));
|
||||
// ordering.push_back(Symbol('X', 4));
|
||||
// ordering.push_back(Symbol('X', 5));
|
||||
// ordering.push_back(Symbol('X', 3));
|
||||
// ordering.push_back(Symbol('X', 6));
|
||||
// ordering.push_back(Symbol('X', 8));
|
||||
// ordering.push_back(Symbol('X', 11));
|
||||
// ordering.push_back(Symbol('X', 10));
|
||||
// ordering.push_back(Symbol('X', 7));
|
||||
// ordering.push_back(Symbol('X', 9));
|
||||
// ordering.push_back(Symbol('X', 12));
|
||||
// Values linpoint;
|
||||
// linpoint.insert(optimalTheta);
|
||||
// GaussianFactorGraph linearGraph = *fullGraph.linearize(linpoint, ordering);
|
||||
// JunctionTree<GaussianFactorGraph, ISAM2Clique> jt(linearGraph);
|
||||
// ISAM2Clique::shared_ptr root = jt.eliminate(EliminateQR);
|
||||
// BayesTree<GaussianConditional, ISAM2Clique> bayesTree;
|
||||
// bayesTree.insert(root);
|
||||
//
|
||||
// // Extract the values for the smoother keys. This consists of the branches: X4 and X6
|
||||
// // Extract the non-root values from the initial values to test the smoother optimization
|
||||
// Values smootherValues;
|
||||
// smootherValues.insert(Symbol('X', 0), fullTheta.at(Symbol('X', 0)));
|
||||
// smootherValues.insert(Symbol('X', 1), fullTheta.at(Symbol('X', 1)));
|
||||
// smootherValues.insert(Symbol('X', 2), fullTheta.at(Symbol('X', 2)));
|
||||
// smootherValues.insert(Symbol('X', 3), fullTheta.at(Symbol('X', 3)));
|
||||
// smootherValues.insert(Symbol('X', 4), fullTheta.at(Symbol('X', 4)));
|
||||
// smootherValues.insert(Symbol('X', 5), fullTheta.at(Symbol('X', 5)));
|
||||
// smootherValues.insert(Symbol('X', 6), fullTheta.at(Symbol('X', 6)));
|
||||
//
|
||||
// // Extract the optimal root values
|
||||
// Values rootValues;
|
||||
// rootValues.insert(Symbol('X', 7), optimalTheta.at(Symbol('X', 7)));
|
||||
// rootValues.insert(Symbol('X', 9), optimalTheta.at(Symbol('X', 9)));
|
||||
// rootValues.insert(Symbol('X', 12), optimalTheta.at(Symbol('X', 12)));
|
||||
//
|
||||
// // Extract the nonlinear smoother factors as any factor with a non-root smoother key
|
||||
// std::set<Key> smootherKeys;
|
||||
// smootherKeys.insert(Symbol('X', 0));
|
||||
// smootherKeys.insert(Symbol('X', 1));
|
||||
// smootherKeys.insert(Symbol('X', 2));
|
||||
// smootherKeys.insert(Symbol('X', 3));
|
||||
// smootherKeys.insert(Symbol('X', 4));
|
||||
// smootherKeys.insert(Symbol('X', 5));
|
||||
// smootherKeys.insert(Symbol('X', 6));
|
||||
// NonlinearFactorGraph smootherFactors;
|
||||
// FindFactorsWithAny(smootherKeys, fullGraph, smootherFactors);
|
||||
//
|
||||
// // Extract the filter summarized factors. This consists of the linear cached factors from
|
||||
// // the filter branches X8 and X10, as well as any nonlinear factor that involves only root keys
|
||||
// NonlinearFactorGraph filterSummarization;
|
||||
// filterSummarization.add(LinearizedJacobianFactor(boost::static_pointer_cast<JacobianFactor>(bayesTree.nodes().at(ordering.at(Symbol('X', 8)))->cachedFactor()), ordering, linpoint));
|
||||
// filterSummarization.add(LinearizedJacobianFactor(boost::static_pointer_cast<JacobianFactor>(bayesTree.nodes().at(ordering.at(Symbol('X', 10)))->cachedFactor()), ordering, linpoint));
|
||||
// std::set<Key> rootKeys;
|
||||
// rootKeys.insert(Symbol('X', 7));
|
||||
// rootKeys.insert(Symbol('X', 9));
|
||||
// rootKeys.insert(Symbol('X', 12));
|
||||
// FindFactorsWithOnly(rootKeys, fullGraph, filterSummarization);
|
||||
//
|
||||
//
|
||||
//
|
||||
// // Perform the synchronization procedure
|
||||
// NonlinearFactorGraph actualSmootherSummarization;
|
||||
// smoother.presync();
|
||||
// smoother.getSummarizedFactors(actualSmootherSummarization);
|
||||
// smoother.synchronize(smootherFactors, smootherValues, filterSummarization, rootValues);
|
||||
// smoother.postsync();
|
||||
//
|
||||
// // Verify the returned smoother values is empty in the first iteration
|
||||
// NonlinearFactorGraph expectedSmootherSummarization;
|
||||
// CHECK(assert_equal(expectedSmootherSummarization, actualSmootherSummarization, 1e-4));
|
||||
//
|
||||
//
|
||||
//
|
||||
// // Perform a full update of the smoother. Since the root values/summarized filter factors were
|
||||
// // created at the optimal values, the smoother should be identical to the batch optimization
|
||||
// smoother.update();
|
||||
// Values actualSmootherTheta = smoother.calculateEstimate();
|
||||
//
|
||||
// // Create the expected values as the optimal set
|
||||
// Values expectedSmootherTheta;
|
||||
// expectedSmootherTheta.insert(Symbol('X', 0), optimalTheta.at(Symbol('X', 0)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 1), optimalTheta.at(Symbol('X', 1)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 2), optimalTheta.at(Symbol('X', 2)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 3), optimalTheta.at(Symbol('X', 3)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 4), optimalTheta.at(Symbol('X', 4)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 5), optimalTheta.at(Symbol('X', 5)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 6), optimalTheta.at(Symbol('X', 6)));
|
||||
//
|
||||
// // Compare filter solution with full batch
|
||||
// CHECK(assert_equal(expectedSmootherTheta, actualSmootherTheta, 1e-4));
|
||||
//
|
||||
//
|
||||
//
|
||||
// // Add a loop closure factor to the smoother and re-check. Since the filter
|
||||
// // factors were created at the optimal linpoint, and since the new loop closure
|
||||
// // does not involve filter keys, the smoother should still yeild the optimal solution
|
||||
// // The new Bayes Tree is:
|
||||
// // Bayes Tree:
|
||||
// // P( X7 X9 X12 )
|
||||
// // P( X10 | X9 )
|
||||
// // P( X11 | X10 )
|
||||
// // P( X8 | X9 )
|
||||
// // P( X6 | X7 X9 )
|
||||
// // P( X4 | X6 X7 )
|
||||
// // P( X3 X5 | X4 X6 )
|
||||
// // P( X2 | X3 )
|
||||
// // P( X1 | X3 X4 )
|
||||
// // P( X0 | X1 )
|
||||
// Pose3 poseDelta = fullTheta.at<Pose3>(Symbol('X', 1)).between(fullTheta.at<Pose3>(Symbol('X', 4)));
|
||||
// NonlinearFactor::shared_ptr loopClosure = NonlinearFactor::shared_ptr(new BetweenFactor<Pose3>(Symbol('X', 1), Symbol('X', 4), poseDelta, noiseOdometery));
|
||||
// fullGraph.push_back(loopClosure);
|
||||
// optimalTheta = BatchOptimize(fullGraph, fullTheta, rootValues);
|
||||
//
|
||||
// // Recreate the Bayes Tree
|
||||
// linpoint.clear();
|
||||
// linpoint.insert(optimalTheta);
|
||||
// linpoint.update(rootValues);
|
||||
// linearGraph = *fullGraph.linearize(linpoint, ordering);
|
||||
// jt = JunctionTree<GaussianFactorGraph, ISAM2Clique>(linearGraph);
|
||||
// root = jt.eliminate(EliminateQR);
|
||||
// bayesTree = BayesTree<GaussianConditional, ISAM2Clique>();
|
||||
// bayesTree.insert(root);
|
||||
//
|
||||
// // Add the loop closure to the smoother
|
||||
// NonlinearFactorGraph newFactors;
|
||||
// newFactors.push_back(loopClosure);
|
||||
// smoother.update(newFactors);
|
||||
// actualSmootherTheta = smoother.calculateEstimate();
|
||||
//
|
||||
// // Create the expected values as the optimal set
|
||||
// expectedSmootherTheta.clear();
|
||||
// expectedSmootherTheta.insert(Symbol('X', 0), optimalTheta.at(Symbol('X', 0)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 1), optimalTheta.at(Symbol('X', 1)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 2), optimalTheta.at(Symbol('X', 2)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 3), optimalTheta.at(Symbol('X', 3)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 4), optimalTheta.at(Symbol('X', 4)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 5), optimalTheta.at(Symbol('X', 5)));
|
||||
// expectedSmootherTheta.insert(Symbol('X', 6), optimalTheta.at(Symbol('X', 6)));
|
||||
//
|
||||
// // Compare filter solution with full batch
|
||||
// // TODO: Check This
|
||||
//// CHECK(assert_equal(expectedSmootherTheta, actualSmootherTheta, 1e-4));
|
||||
//
|
||||
//
|
||||
//
|
||||
// // Now perform a second synchronization to test the smoother-calculated summarization
|
||||
// actualSmootherSummarization.resize(0);
|
||||
// smootherFactors.resize(0);
|
||||
// smootherValues.clear();
|
||||
// smoother.presync();
|
||||
// smoother.getSummarizedFactors(actualSmootherSummarization);
|
||||
// smoother.synchronize(smootherFactors, smootherValues, filterSummarization, rootValues);
|
||||
// smoother.postsync();
|
||||
//
|
||||
// // Extract the expected smoother summarization from the Bayes Tree
|
||||
// // The smoother branches after the addition of the loop closure is only X6
|
||||
// expectedSmootherSummarization.resize(0);
|
||||
// JacobianFactor::shared_ptr jf = boost::dynamic_pointer_cast<JacobianFactor>(bayesTree.nodes().at(ordering.at(Symbol('X', 6)))->cachedFactor());
|
||||
// LinearizedJacobianFactor::shared_ptr ljf(new LinearizedJacobianFactor(jf, ordering, linpoint));
|
||||
// expectedSmootherSummarization.push_back(ljf);
|
||||
//
|
||||
// // Compare smoother factors with the expected factors by computing the hessian information matrix
|
||||
// // TODO: Check This
|
||||
//// CHECK(hessian_equal(expectedSmootherSummarization, actualSmootherSummarization, linpoint, 1e-4));
|
||||
//
|
||||
//
|
||||
//
|
||||
// // TODO: Modify the second synchronization so that the filter sends an additional set of factors.
|
||||
// // I'm not sure what additional code this will exercise, but just for good measure.
|
||||
//
|
||||
//}
|
||||
/* ************************************************************************* */
|
||||
TEST_UNSAFE( ConcurrentBatchSmoother, synchronize )
|
||||
{
|
||||
// Test the 'synchronize' function of the ConcurrentBatchSmoother in a nonlinear environment.
|
||||
// The smoother is operating on a known tree structure, so the factors and summarization can
|
||||
// be predicted for testing purposes
|
||||
|
||||
// Create a set of optimizer parameters
|
||||
LevenbergMarquardtParams parameters;
|
||||
|
||||
// Create a Concurrent Batch Smoother
|
||||
ConcurrentBatchSmootherTester smoother(parameters);
|
||||
|
||||
// Create containers to keep the full graph
|
||||
Values fullTheta;
|
||||
NonlinearFactorGraph fullGraph;
|
||||
|
||||
// Create factors for times 0 - 12
|
||||
// When eliminated with ordering (X2 X0 X1 X4 X5 X3 X6 X8 X11 X10 X7 X9 X12)augmentedHessian
|
||||
// ... this Bayes Tree is produced:
|
||||
// Bayes Tree:
|
||||
// P( X7 X9 X12 )
|
||||
// P( X10 | X9 )
|
||||
// P( X11 | X10 )
|
||||
// P( X8 | X9 )
|
||||
// P( X6 | X7 X9 )
|
||||
// P( X5 X3 | X6 )
|
||||
// P( X1 | X3 )
|
||||
// P( X0 | X1 )
|
||||
// P( X2 | X3 )
|
||||
// P( X4 | X7 )
|
||||
// We then produce the inputs necessary for the 'synchronize' function.
|
||||
// The smoother is branches X4 and X6, the filter is branches X8 and X10, and the root is (X7 X9 X12)
|
||||
CreateFactors(fullGraph, fullTheta, 0, 13);
|
||||
|
||||
// Optimize the full graph
|
||||
Values optimalTheta = BatchOptimize(fullGraph, fullTheta);
|
||||
|
||||
// Re-eliminate to create the Bayes Tree
|
||||
Ordering ordering;
|
||||
ordering.push_back(Symbol('X', 2));
|
||||
ordering.push_back(Symbol('X', 0));
|
||||
ordering.push_back(Symbol('X', 1));
|
||||
ordering.push_back(Symbol('X', 4));
|
||||
ordering.push_back(Symbol('X', 5));
|
||||
ordering.push_back(Symbol('X', 3));
|
||||
ordering.push_back(Symbol('X', 6));
|
||||
ordering.push_back(Symbol('X', 8));
|
||||
ordering.push_back(Symbol('X', 11));
|
||||
ordering.push_back(Symbol('X', 10));
|
||||
ordering.push_back(Symbol('X', 7));
|
||||
ordering.push_back(Symbol('X', 9));
|
||||
ordering.push_back(Symbol('X', 12));
|
||||
Values linpoint;
|
||||
linpoint.insert(optimalTheta);
|
||||
GaussianFactorGraph linearGraph = *fullGraph.linearize(linpoint, ordering);
|
||||
JunctionTree<GaussianFactorGraph, ISAM2Clique> jt(linearGraph);
|
||||
ISAM2Clique::shared_ptr root = jt.eliminate(EliminateQR);
|
||||
BayesTree<GaussianConditional, ISAM2Clique> bayesTree;
|
||||
bayesTree.insert(root);
|
||||
|
||||
// Extract the values for the smoother keys. This consists of the branches: X4 and X6
|
||||
// Extract the non-root values from the initial values to test the smoother optimization
|
||||
Values smootherValues;
|
||||
smootherValues.insert(Symbol('X', 0), fullTheta.at(Symbol('X', 0)));
|
||||
smootherValues.insert(Symbol('X', 1), fullTheta.at(Symbol('X', 1)));
|
||||
smootherValues.insert(Symbol('X', 2), fullTheta.at(Symbol('X', 2)));
|
||||
smootherValues.insert(Symbol('X', 3), fullTheta.at(Symbol('X', 3)));
|
||||
smootherValues.insert(Symbol('X', 4), fullTheta.at(Symbol('X', 4)));
|
||||
smootherValues.insert(Symbol('X', 5), fullTheta.at(Symbol('X', 5)));
|
||||
smootherValues.insert(Symbol('X', 6), fullTheta.at(Symbol('X', 6)));
|
||||
|
||||
// Extract the optimal root values
|
||||
Values rootValues;
|
||||
rootValues.insert(Symbol('X', 7), optimalTheta.at(Symbol('X', 7)));
|
||||
rootValues.insert(Symbol('X', 9), optimalTheta.at(Symbol('X', 9)));
|
||||
rootValues.insert(Symbol('X', 12), optimalTheta.at(Symbol('X', 12)));
|
||||
|
||||
// Extract the nonlinear smoother factors as any factor with a non-root smoother key
|
||||
std::set<Key> smootherKeys;
|
||||
smootherKeys.insert(Symbol('X', 0));
|
||||
smootherKeys.insert(Symbol('X', 1));
|
||||
smootherKeys.insert(Symbol('X', 2));
|
||||
smootherKeys.insert(Symbol('X', 3));
|
||||
smootherKeys.insert(Symbol('X', 4));
|
||||
smootherKeys.insert(Symbol('X', 5));
|
||||
smootherKeys.insert(Symbol('X', 6));
|
||||
NonlinearFactorGraph smootherFactors;
|
||||
FindFactorsWithAny(smootherKeys, fullGraph, smootherFactors);
|
||||
|
||||
// Extract the filter summarized factors. This consists of the linear cached factors from
|
||||
// the filter branches X8 and X10, as well as any nonlinear factor that involves only root keys
|
||||
NonlinearFactorGraph filterSummarization;
|
||||
filterSummarization.add(LinearContainerFactor(bayesTree.nodes().at(ordering.at(Symbol('X', 8)))->cachedFactor(), ordering, linpoint));
|
||||
filterSummarization.add(LinearContainerFactor(bayesTree.nodes().at(ordering.at(Symbol('X', 10)))->cachedFactor(), ordering, linpoint));
|
||||
std::set<Key> rootKeys;
|
||||
rootKeys.insert(Symbol('X', 7));
|
||||
rootKeys.insert(Symbol('X', 9));
|
||||
rootKeys.insert(Symbol('X', 12));
|
||||
FindFactorsWithOnly(rootKeys, fullGraph, filterSummarization);
|
||||
|
||||
|
||||
|
||||
// Perform the synchronization procedure
|
||||
NonlinearFactorGraph actualSmootherSummarization;
|
||||
smoother.presync();
|
||||
smoother.getSummarizedFactors(actualSmootherSummarization);
|
||||
smoother.synchronize(smootherFactors, smootherValues, filterSummarization, rootValues);
|
||||
smoother.postsync();
|
||||
|
||||
// Verify the returned smoother values is empty in the first iteration
|
||||
NonlinearFactorGraph expectedSmootherSummarization;
|
||||
CHECK(assert_equal(expectedSmootherSummarization, actualSmootherSummarization, 1e-4));
|
||||
|
||||
|
||||
|
||||
// Perform a full update of the smoother. Since the root values/summarized filter factors were
|
||||
// created at the optimal values, the smoother should be identical to the batch optimization
|
||||
smoother.update();
|
||||
Values actualSmootherTheta = smoother.calculateEstimate();
|
||||
|
||||
// Create the expected values from the optimal set
|
||||
Values expectedSmootherTheta;
|
||||
expectedSmootherTheta.insert(Symbol('X', 0), optimalTheta.at(Symbol('X', 0)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 1), optimalTheta.at(Symbol('X', 1)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 2), optimalTheta.at(Symbol('X', 2)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 3), optimalTheta.at(Symbol('X', 3)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 4), optimalTheta.at(Symbol('X', 4)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 5), optimalTheta.at(Symbol('X', 5)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 6), optimalTheta.at(Symbol('X', 6)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 7), optimalTheta.at(Symbol('X', 7)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 9), optimalTheta.at(Symbol('X', 9)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 12), optimalTheta.at(Symbol('X', 12)));
|
||||
|
||||
// Compare filter solution with full batch
|
||||
CHECK(assert_equal(expectedSmootherTheta, actualSmootherTheta, 1e-4));
|
||||
|
||||
|
||||
|
||||
// Add a loop closure factor to the smoother and re-check. Since the filter
|
||||
// factors were created at the optimal linpoint, and since the new loop closure
|
||||
// does not involve filter keys, the smoother should still yield the optimal solution
|
||||
// The new Bayes Tree is:
|
||||
// Bayes Tree:
|
||||
// P( X7 X9 X12 )
|
||||
// P( X10 | X9 )
|
||||
// P( X11 | X10 )
|
||||
// P( X8 | X9 )
|
||||
// P( X6 | X7 X9 )
|
||||
// P( X4 | X6 X7 )
|
||||
// P( X3 X5 | X4 X6 )
|
||||
// P( X2 | X3 )
|
||||
// P( X1 | X3 X4 )
|
||||
// P( X0 | X1 )
|
||||
Pose3 poseDelta = fullTheta.at<Pose3>(Symbol('X', 1)).between(fullTheta.at<Pose3>(Symbol('X', 4)));
|
||||
NonlinearFactor::shared_ptr loopClosure = NonlinearFactor::shared_ptr(new BetweenFactor<Pose3>(Symbol('X', 1), Symbol('X', 4), poseDelta, noiseOdometery));
|
||||
fullGraph.push_back(loopClosure);
|
||||
optimalTheta = BatchOptimize(fullGraph, fullTheta, rootValues);
|
||||
|
||||
// Recreate the Bayes Tree
|
||||
linpoint.clear();
|
||||
linpoint.insert(optimalTheta);
|
||||
linpoint.update(rootValues);
|
||||
linearGraph = *fullGraph.linearize(linpoint, ordering);
|
||||
jt = JunctionTree<GaussianFactorGraph, ISAM2Clique>(linearGraph);
|
||||
root = jt.eliminate(EliminateQR);
|
||||
bayesTree = BayesTree<GaussianConditional, ISAM2Clique>();
|
||||
bayesTree.insert(root);
|
||||
|
||||
// Add the loop closure to the smoother
|
||||
NonlinearFactorGraph newFactors;
|
||||
newFactors.push_back(loopClosure);
|
||||
smoother.update(newFactors);
|
||||
actualSmootherTheta = smoother.calculateEstimate();
|
||||
|
||||
// Create the expected values as the optimal set
|
||||
expectedSmootherTheta.clear();
|
||||
expectedSmootherTheta.insert(Symbol('X', 0), optimalTheta.at(Symbol('X', 0)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 1), optimalTheta.at(Symbol('X', 1)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 2), optimalTheta.at(Symbol('X', 2)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 3), optimalTheta.at(Symbol('X', 3)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 4), optimalTheta.at(Symbol('X', 4)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 5), optimalTheta.at(Symbol('X', 5)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 6), optimalTheta.at(Symbol('X', 6)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 7), optimalTheta.at(Symbol('X', 7)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 9), optimalTheta.at(Symbol('X', 9)));
|
||||
expectedSmootherTheta.insert(Symbol('X', 12), optimalTheta.at(Symbol('X', 12)));
|
||||
|
||||
// Compare filter solution with full batch
|
||||
CHECK(assert_equal(expectedSmootherTheta, actualSmootherTheta, 1e-4));
|
||||
|
||||
|
||||
|
||||
// Now perform a second synchronization to test the smoother-calculated summarization
|
||||
actualSmootherSummarization.resize(0);
|
||||
smootherFactors.resize(0);
|
||||
smootherValues.clear();
|
||||
smoother.presync();
|
||||
smoother.getSummarizedFactors(actualSmootherSummarization);
|
||||
smoother.synchronize(smootherFactors, smootherValues, filterSummarization, rootValues);
|
||||
smoother.postsync();
|
||||
|
||||
// Extract the expected smoother summarization from the Bayes Tree
|
||||
// The smoother branches after the addition of the loop closure is only X6
|
||||
expectedSmootherSummarization.resize(0);
|
||||
LinearContainerFactor::shared_ptr factor(new LinearContainerFactor(bayesTree.nodes().at(ordering.at(Symbol('X', 6)))->cachedFactor(), ordering, linpoint));
|
||||
expectedSmootherSummarization.push_back(factor);
|
||||
|
||||
// Compare smoother factors with the expected factors by computing the hessian information matrix
|
||||
CHECK(hessian_equal(expectedSmootherSummarization, actualSmootherSummarization, linpoint, 1e-4));
|
||||
|
||||
|
||||
|
||||
// TODO: Modify the second synchronization so that the filter sends an additional set of factors.
|
||||
// I'm not sure what additional code this will exercise, but just for good measure.
|
||||
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
|
|
Loading…
Reference in New Issue