ConcurrentFilterAndSmoother updated and working with the unordered version of GTSAM
parent
4efdf497cd
commit
4dfa2506ab
|
@ -18,13 +18,8 @@ add_custom_target(check.unstable COMMAND ${CMAKE_CTEST_COMMAND} --output-on-fail
|
|||
# Add the full name to this list, as in the following example
|
||||
# Sources to remove from builds
|
||||
set (excluded_sources # "")
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/BatchFixedLagSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentBatchFilter.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentBatchSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentFilteringAndSmoothing.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentIncrementalFilter.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/ConcurrentIncrementalSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/FixedLagSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/nonlinear/IncrementalFixedLagSmoother.cpp"
|
||||
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/slam/serialization.cpp"
|
||||
|
|
|
@ -55,17 +55,12 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update(const NonlinearFactorGrap
|
|||
// Add the new variables to theta
|
||||
theta_.insert(newTheta);
|
||||
// Add new variables to the end of the ordering
|
||||
std::vector<size_t> dims;
|
||||
dims.reserve(newTheta.size());
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
dims.push_back(key_value.value.dim());
|
||||
}
|
||||
// Augment Delta
|
||||
delta_.append(dims);
|
||||
for(size_t i = delta_.size() - dims.size(); i < delta_.size(); ++i) {
|
||||
delta_[i].setZero();
|
||||
}
|
||||
delta_.insert(newTheta.zeroVectors());
|
||||
|
||||
// Add the new factors to the graph, updating the variable index
|
||||
insertFactors(newFactors);
|
||||
gttoc(augment_system);
|
||||
|
@ -171,22 +166,11 @@ void BatchFixedLagSmoother::eraseKeys(const std::set<Key>& keys) {
|
|||
|
||||
eraseKeyTimestampMap(keys);
|
||||
|
||||
// Permute the ordering such that the removed keys are at the end.
|
||||
// This is a prerequisite for removing them from several structures
|
||||
std::vector<Index> toBack;
|
||||
BOOST_FOREACH(Key key, keys) {
|
||||
toBack.push_back(ordering_.at(key));
|
||||
}
|
||||
Permutation forwardPermutation = Permutation::PushToBack(toBack, ordering_.size());
|
||||
ordering_.permuteInPlace(forwardPermutation);
|
||||
delta_.permuteInPlace(forwardPermutation);
|
||||
|
||||
// Remove marginalized keys from the ordering and delta
|
||||
for(size_t i = 0; i < keys.size(); ++i) {
|
||||
ordering_.pop_back();
|
||||
delta_.pop_back();
|
||||
BOOST_FOREACH(Key key, keys) {
|
||||
ordering_.erase(std::find(ordering_.begin(), ordering_.end(), key));
|
||||
delta_.erase(key);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -206,29 +190,8 @@ void BatchFixedLagSmoother::reorder(const std::set<Key>& marginalizeKeys) {
|
|||
std::cout << std::endl;
|
||||
}
|
||||
|
||||
// Calculate a variable index
|
||||
VariableIndex variableIndex(*factors_.symbolic(ordering_), ordering_.size());
|
||||
|
||||
// COLAMD groups will be used to place marginalize keys in Group 0, and everything else in Group 1
|
||||
int group0 = 0;
|
||||
int group1 = marginalizeKeys.size() > 0 ? 1 : 0;
|
||||
|
||||
// Initialize all variables to group1
|
||||
std::vector<int> cmember(variableIndex.size(), group1);
|
||||
|
||||
// Set all of the marginalizeKeys to Group0
|
||||
if(marginalizeKeys.size() > 0) {
|
||||
BOOST_FOREACH(Key key, marginalizeKeys) {
|
||||
cmember[ordering_.at(key)] = group0;
|
||||
}
|
||||
}
|
||||
|
||||
// Generate the permutation
|
||||
Permutation forwardPermutation = *inference::PermutationCOLAMD_(variableIndex, cmember);
|
||||
|
||||
// Permute the ordering, variable index, and deltas
|
||||
ordering_.permuteInPlace(forwardPermutation);
|
||||
delta_.permuteInPlace(forwardPermutation);
|
||||
ordering_ = Ordering::COLAMDConstrainedFirst(factors_, std::vector<Key>(marginalizeKeys.begin(), marginalizeKeys.end()));
|
||||
|
||||
if(debug) {
|
||||
ordering_.print("New Ordering: ");
|
||||
|
@ -264,7 +227,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
|||
double errorTol = parameters_.errorTol;
|
||||
|
||||
// Create a Values that holds the current evaluation point
|
||||
Values evalpoint = theta_.retract(delta_, ordering_);
|
||||
Values evalpoint = theta_.retract(delta_);
|
||||
result.error = factors_.error(evalpoint);
|
||||
|
||||
// check if we're already close enough
|
||||
|
@ -288,7 +251,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
|||
gttic(optimizer_iteration);
|
||||
{
|
||||
// Linearize graph around the linearization point
|
||||
GaussianFactorGraph linearFactorGraph = *factors_.linearize(theta_, ordering_);
|
||||
GaussianFactorGraph linearFactorGraph = *factors_.linearize(theta_);
|
||||
|
||||
// Keep increasing lambda until we make make progress
|
||||
while(true) {
|
||||
|
@ -302,12 +265,12 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
|||
{
|
||||
// for each of the variables, add a prior at the current solution
|
||||
double sigma = 1.0 / std::sqrt(lambda);
|
||||
for(size_t j=0; j<delta_.size(); ++j) {
|
||||
size_t dim = delta_[j].size();
|
||||
Matrix A = eye(dim);
|
||||
Vector b = delta_[j];
|
||||
BOOST_FOREACH(const VectorValues::KeyValuePair& key_value, delta_) {
|
||||
size_t dim = key_value.second.size();
|
||||
Matrix A = Matrix::Identity(dim,dim);
|
||||
Vector b = key_value.second;
|
||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma);
|
||||
GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model));
|
||||
GaussianFactor::shared_ptr prior(new JacobianFactor(key_value.first, A, b, model));
|
||||
dampedFactorGraph.push_back(prior);
|
||||
}
|
||||
}
|
||||
|
@ -316,9 +279,9 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
|||
|
||||
gttic(solve);
|
||||
// Solve Damped Gaussian Factor Graph
|
||||
newDelta = GaussianJunctionTree(dampedFactorGraph).optimize(parameters_.getEliminationFunction());
|
||||
newDelta = dampedFactorGraph.optimize(ordering_, parameters_.getEliminationFunction());
|
||||
// update the evalpoint with the new delta
|
||||
evalpoint = theta_.retract(newDelta, ordering_);
|
||||
evalpoint = theta_.retract(newDelta);
|
||||
gttoc(solve);
|
||||
|
||||
// Evaluate the new error
|
||||
|
@ -343,8 +306,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
|||
if(enforceConsistency_ && (linearKeys_.size() > 0)) {
|
||||
theta_.update(linearKeys_);
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, linearKeys_) {
|
||||
Index index = ordering_.at(key_value.key);
|
||||
delta_.at(index) = newDelta.at(index);
|
||||
delta_.at(key_value.key) = newDelta.at(key_value.key);
|
||||
}
|
||||
}
|
||||
// Decrease lambda for next time
|
||||
|
@ -406,9 +368,9 @@ void BatchFixedLagSmoother::marginalize(const std::set<Key>& marginalizeKeys) {
|
|||
|
||||
// Identify all of the factors involving any marginalized variable. These must be removed.
|
||||
std::set<size_t> removedFactorSlots;
|
||||
VariableIndex variableIndex(*factors_.symbolic(ordering_), theta_.size());
|
||||
VariableIndex variableIndex(factors_);
|
||||
BOOST_FOREACH(Key key, marginalizeKeys) {
|
||||
const FastList<size_t>& slots = variableIndex[ordering_.at(key)];
|
||||
const FastList<size_t>& slots = variableIndex[key];
|
||||
BOOST_FOREACH(size_t slot, slots) {
|
||||
removedFactorSlots.insert(slot);
|
||||
}
|
||||
|
@ -483,10 +445,10 @@ void BatchFixedLagSmoother::PrintSymbolicFactor(const NonlinearFactor::shared_pt
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void BatchFixedLagSmoother::PrintSymbolicFactor(const GaussianFactor::shared_ptr& factor, const Ordering& ordering) {
|
||||
void BatchFixedLagSmoother::PrintSymbolicFactor(const GaussianFactor::shared_ptr& factor) {
|
||||
std::cout << "f(";
|
||||
BOOST_FOREACH(Index index, factor->keys()) {
|
||||
std::cout << " " << index << "[" << gtsam::DefaultKeyFormatter(ordering.key(index)) << "]";
|
||||
BOOST_FOREACH(Key key, factor->keys()) {
|
||||
std::cout << " " << gtsam::DefaultKeyFormatter(key);
|
||||
}
|
||||
std::cout << " )" << std::endl;
|
||||
}
|
||||
|
@ -500,102 +462,14 @@ void BatchFixedLagSmoother::PrintSymbolicGraph(const NonlinearFactorGraph& graph
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void BatchFixedLagSmoother::PrintSymbolicGraph(const GaussianFactorGraph& graph, const Ordering& ordering, const std::string& label) {
|
||||
void BatchFixedLagSmoother::PrintSymbolicGraph(const GaussianFactorGraph& graph, const std::string& label) {
|
||||
std::cout << label << std::endl;
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, graph) {
|
||||
PrintSymbolicFactor(factor, ordering);
|
||||
PrintSymbolicFactor(factor);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::vector<Index> BatchFixedLagSmoother::EliminationForest::ComputeParents(const VariableIndex& structure) {
|
||||
// Number of factors and variables
|
||||
const size_t m = structure.nFactors();
|
||||
const size_t n = structure.size();
|
||||
|
||||
static const Index none = std::numeric_limits<Index>::max();
|
||||
|
||||
// Allocate result parent vector and vector of last factor columns
|
||||
std::vector<Index> parents(n, none);
|
||||
std::vector<Index> prevCol(m, none);
|
||||
|
||||
// for column j \in 1 to n do
|
||||
for (Index j = 0; j < n; j++) {
|
||||
// for row i \in Struct[A*j] do
|
||||
BOOST_FOREACH(const size_t i, structure[j]) {
|
||||
if (prevCol[i] != none) {
|
||||
Index k = prevCol[i];
|
||||
// find root r of the current tree that contains k
|
||||
Index r = k;
|
||||
while (parents[r] != none)
|
||||
r = parents[r];
|
||||
if (r != j) parents[r] = j;
|
||||
}
|
||||
prevCol[i] = j;
|
||||
}
|
||||
}
|
||||
|
||||
return parents;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::vector<BatchFixedLagSmoother::EliminationForest::shared_ptr> BatchFixedLagSmoother::EliminationForest::Create(const GaussianFactorGraph& factorGraph, const VariableIndex& structure) {
|
||||
// Compute the tree structure
|
||||
std::vector<Index> parents(ComputeParents(structure));
|
||||
|
||||
// Number of variables
|
||||
const size_t n = structure.size();
|
||||
|
||||
static const Index none = std::numeric_limits<Index>::max();
|
||||
|
||||
// Create tree structure
|
||||
std::vector<shared_ptr> trees(n);
|
||||
for (Index k = 1; k <= n; k++) {
|
||||
Index j = n - k; // Start at the last variable and loop down to 0
|
||||
trees[j].reset(new EliminationForest(j)); // Create a new node on this variable
|
||||
if (parents[j] != none) // If this node has a parent, add it to the parent's children
|
||||
trees[parents[j]]->add(trees[j]);
|
||||
}
|
||||
|
||||
// Hang factors in right places
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factorGraph) {
|
||||
if(factor && factor->size() > 0) {
|
||||
Index j = *std::min_element(factor->begin(), factor->end());
|
||||
if(j < structure.size())
|
||||
trees[j]->add(factor);
|
||||
}
|
||||
}
|
||||
|
||||
return trees;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianFactor::shared_ptr BatchFixedLagSmoother::EliminationForest::eliminateRecursive(GaussianFactorGraph::Eliminate function) {
|
||||
|
||||
// Create the list of factors to be eliminated, initially empty, and reserve space
|
||||
GaussianFactorGraph factors;
|
||||
factors.reserve(this->factors_.size() + this->subTrees_.size());
|
||||
|
||||
// Add all factors associated with the current node
|
||||
factors.push_back(this->factors_.begin(), this->factors_.end());
|
||||
|
||||
// for all subtrees, eliminate into Bayes net and a separator factor, added to [factors]
|
||||
BOOST_FOREACH(const shared_ptr& child, subTrees_)
|
||||
factors.push_back(child->eliminateRecursive(function));
|
||||
|
||||
// Combine all factors (from this node and from subtrees) into a joint factor
|
||||
GaussianFactorGraph::EliminationResult eliminated(function(factors, 1));
|
||||
|
||||
return eliminated.second;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void BatchFixedLagSmoother::EliminationForest::removeChildrenIndices(std::set<Index>& indices, const BatchFixedLagSmoother::EliminationForest::shared_ptr& tree) {
|
||||
BOOST_FOREACH(const EliminationForest::shared_ptr& child, tree->children()) {
|
||||
indices.erase(child->key());
|
||||
removeChildrenIndices(indices, child);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearFactorGraph BatchFixedLagSmoother::calculateMarginalFactors(const NonlinearFactorGraph& graph, const Values& theta,
|
||||
|
@ -621,68 +495,20 @@ NonlinearFactorGraph BatchFixedLagSmoother::calculateMarginalFactors(const Nonli
|
|||
if(debug) std::cout << "BatchFixedLagSmoother::calculateMarginalFactors FINISH" << std::endl;
|
||||
return graph;
|
||||
} else {
|
||||
// Create a subset of theta that only contains the required keys
|
||||
Values values;
|
||||
BOOST_FOREACH(Key key, allKeys) {
|
||||
values.insert(key, theta.at(key));
|
||||
}
|
||||
|
||||
// Calculate the ordering: [Others Root]
|
||||
std::map<Key, int> constraints;
|
||||
BOOST_FOREACH(Key key, marginalizeKeys) {
|
||||
constraints[key] = 0;
|
||||
}
|
||||
BOOST_FOREACH(Key key, remainingKeys) {
|
||||
constraints[key] = 1;
|
||||
}
|
||||
Ordering ordering = *graph.orderingCOLAMDConstrained(values, constraints);
|
||||
|
||||
// Create the linear factor graph
|
||||
GaussianFactorGraph linearFactorGraph = *graph.linearize(values, ordering);
|
||||
GaussianFactorGraph linearFactorGraph = *graph.linearize(theta);
|
||||
// .first is the eliminated Bayes tree, while .second is the remaining factor graph
|
||||
GaussianFactorGraph marginalLinearFactors = *linearFactorGraph.eliminatePartialMultifrontal(std::vector<Key>(marginalizeKeys.begin(), marginalizeKeys.end())).second;
|
||||
|
||||
// Construct a variable index
|
||||
VariableIndex variableIndex(linearFactorGraph, ordering.size());
|
||||
|
||||
// Construct an elimination tree to perform sparse elimination
|
||||
std::vector<EliminationForest::shared_ptr> forest( BatchFixedLagSmoother::EliminationForest::Create(linearFactorGraph, variableIndex) );
|
||||
|
||||
// This is a forest. Only the top-most node/index of each tree needs to be eliminated; all of the children will be eliminated automatically
|
||||
// Find the subset of nodes/keys that must be eliminated
|
||||
std::set<Index> indicesToEliminate;
|
||||
BOOST_FOREACH(Key key, marginalizeKeys) {
|
||||
indicesToEliminate.insert(ordering.at(key));
|
||||
}
|
||||
BOOST_FOREACH(Key key, marginalizeKeys) {
|
||||
EliminationForest::removeChildrenIndices(indicesToEliminate, forest.at(ordering.at(key)));
|
||||
}
|
||||
|
||||
if(debug) PrintKeySet(indicesToEliminate, "BatchFixedLagSmoother::calculateMarginalFactors Indices To Eliminate: ");
|
||||
|
||||
// Eliminate each top-most key, returning a Gaussian Factor on some of the remaining variables
|
||||
// Convert the marginal factors into Linear Container Factors
|
||||
// Wrap in nonlinear container factors
|
||||
NonlinearFactorGraph marginalFactors;
|
||||
BOOST_FOREACH(Index index, indicesToEliminate) {
|
||||
GaussianFactor::shared_ptr gaussianFactor = forest.at(index)->eliminateRecursive(eliminateFunction);
|
||||
if(gaussianFactor->size() > 0) {
|
||||
LinearContainerFactor::shared_ptr marginalFactor(new LinearContainerFactor(gaussianFactor, ordering, values));
|
||||
marginalFactors.push_back(marginalFactor);
|
||||
if(debug) {
|
||||
std::cout << "BatchFixedLagSmoother::calculateMarginalFactors Marginal Factor: ";
|
||||
PrintSymbolicFactor(marginalFactor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Also add any remaining factors that were unaffected by marginalizing out the selected variables.
|
||||
// These are part of the marginal on the remaining variables as well.
|
||||
BOOST_FOREACH(Key key, remainingKeys) {
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& gaussianFactor, forest.at(ordering.at(key))->factors()) {
|
||||
LinearContainerFactor::shared_ptr marginalFactor(new LinearContainerFactor(gaussianFactor, ordering, values));
|
||||
marginalFactors.push_back(marginalFactor);
|
||||
if(debug) {
|
||||
std::cout << "BatchFixedLagSmoother::calculateMarginalFactors Remaining Factor: ";
|
||||
PrintSymbolicFactor(marginalFactor);
|
||||
}
|
||||
marginalFactors.reserve(marginalLinearFactors.size());
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& gaussianFactor, marginalLinearFactors) {
|
||||
marginalFactors += boost::make_shared<LinearContainerFactor>(gaussianFactor, theta);
|
||||
if(debug) {
|
||||
std::cout << "BatchFixedLagSmoother::calculateMarginalFactors Marginal Factor: ";
|
||||
PrintSymbolicFactor(marginalFactors.back());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -154,57 +154,6 @@ protected:
|
|||
/** Marginalize out selected variables */
|
||||
void marginalize(const std::set<Key>& marginalizableKeys);
|
||||
|
||||
|
||||
// A custom elimination tree that supports forests and partial elimination
|
||||
class EliminationForest {
|
||||
public:
|
||||
typedef boost::shared_ptr<EliminationForest> shared_ptr; ///< Shared pointer to this class
|
||||
|
||||
private:
|
||||
typedef FastList<GaussianFactor::shared_ptr> Factors;
|
||||
typedef FastList<shared_ptr> SubTrees;
|
||||
typedef std::vector<GaussianConditional::shared_ptr> Conditionals;
|
||||
|
||||
Index key_; ///< index associated with root
|
||||
Factors factors_; ///< factors associated with root
|
||||
SubTrees subTrees_; ///< sub-trees
|
||||
|
||||
/** default constructor, private, as you should use Create below */
|
||||
EliminationForest(Index key = 0) : key_(key) {}
|
||||
|
||||
/**
|
||||
* Static internal function to build a vector of parent pointers using the
|
||||
* algorithm of Gilbert et al., 2001, BIT.
|
||||
*/
|
||||
static std::vector<Index> ComputeParents(const VariableIndex& structure);
|
||||
|
||||
/** add a factor, for Create use only */
|
||||
void add(const GaussianFactor::shared_ptr& factor) { factors_.push_back(factor); }
|
||||
|
||||
/** add a subtree, for Create use only */
|
||||
void add(const shared_ptr& child) { subTrees_.push_back(child); }
|
||||
|
||||
public:
|
||||
|
||||
/** return the key associated with this tree node */
|
||||
Index key() const { return key_; }
|
||||
|
||||
/** return the const reference of children */
|
||||
const SubTrees& children() const { return subTrees_; }
|
||||
|
||||
/** return the const reference to the factors */
|
||||
const Factors& factors() const { return factors_; }
|
||||
|
||||
/** Create an elimination tree from a factor graph */
|
||||
static std::vector<shared_ptr> Create(const GaussianFactorGraph& factorGraph, const VariableIndex& structure);
|
||||
|
||||
/** Recursive routine that eliminates the factors arranged in an elimination tree */
|
||||
GaussianFactor::shared_ptr eliminateRecursive(GaussianFactorGraph::Eliminate function);
|
||||
|
||||
/** Recursive function that helps find the top of each tree */
|
||||
static void removeChildrenIndices(std::set<Index>& indices, const EliminationForest::shared_ptr& tree);
|
||||
};
|
||||
|
||||
static NonlinearFactorGraph calculateMarginalFactors(const NonlinearFactorGraph& graph, const Values& theta,
|
||||
const std::set<Key>& marginalizeKeys, const GaussianFactorGraph::Eliminate& eliminateFunction);
|
||||
|
||||
|
@ -213,9 +162,9 @@ private:
|
|||
static void PrintKeySet(const std::set<Key>& keys, const std::string& label);
|
||||
static void PrintKeySet(const gtsam::FastSet<Key>& keys, const std::string& label);
|
||||
static void PrintSymbolicFactor(const NonlinearFactor::shared_ptr& factor);
|
||||
static void PrintSymbolicFactor(const GaussianFactor::shared_ptr& factor, const Ordering& ordering);
|
||||
static void PrintSymbolicFactor(const GaussianFactor::shared_ptr& factor);
|
||||
static void PrintSymbolicGraph(const NonlinearFactorGraph& graph, const std::string& label);
|
||||
static void PrintSymbolicGraph(const GaussianFactorGraph& graph, const Ordering& ordering, const std::string& label);
|
||||
static void PrintSymbolicGraph(const GaussianFactorGraph& graph, const std::string& label);
|
||||
}; // BatchFixedLagSmoother
|
||||
|
||||
} /// namespace gtsam
|
||||
|
|
|
@ -20,9 +20,6 @@ ${gtsam_unstable-default})
|
|||
|
||||
# Exclude tests that don't work
|
||||
set (nonlinear_excluded_tests #"")
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testBatchFixedLagSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testConcurrentBatchFilter.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testConcurrentBatchSmoother.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testConcurrentIncrementalFilter.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testConcurrentIncrementalSmootherDL.cpp"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/testConcurrentIncrementalSmootherGN.cpp"
|
||||
|
|
|
@ -61,7 +61,7 @@ void ConcurrentBatchFilter::PrintNonlinearFactorGraph(const NonlinearFactorGraph
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void ConcurrentBatchFilter::PrintLinearFactor(const GaussianFactor::shared_ptr& factor, const Ordering& ordering,
|
||||
void ConcurrentBatchFilter::PrintLinearFactor(const GaussianFactor::shared_ptr& factor,
|
||||
const std::string& indent, const KeyFormatter& keyFormatter) {
|
||||
std::cout << indent;
|
||||
if(factor) {
|
||||
|
@ -74,8 +74,8 @@ void ConcurrentBatchFilter::PrintLinearFactor(const GaussianFactor::shared_ptr&
|
|||
} else {
|
||||
std::cout << "g( ";
|
||||
}
|
||||
BOOST_FOREACH(Index index, *factor) {
|
||||
std::cout << keyFormatter(ordering.key(index)) << " ";
|
||||
BOOST_FOREACH(Key key, *factor) {
|
||||
std::cout << keyFormatter(key) << " ";
|
||||
}
|
||||
std::cout << ")" << std::endl;
|
||||
} else {
|
||||
|
@ -84,11 +84,11 @@ void ConcurrentBatchFilter::PrintLinearFactor(const GaussianFactor::shared_ptr&
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void ConcurrentBatchFilter::PrintLinearFactorGraph(const GaussianFactorGraph& factors, const Ordering& ordering,
|
||||
void ConcurrentBatchFilter::PrintLinearFactorGraph(const GaussianFactorGraph& factors,
|
||||
const std::string& indent, const std::string& title, const KeyFormatter& keyFormatter) {
|
||||
std::cout << indent << title << std::endl;
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factors) {
|
||||
PrintLinearFactor(factor, ordering, indent + " ", keyFormatter);
|
||||
PrintLinearFactor(factor, indent + " ", keyFormatter);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -149,20 +149,16 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::update(const NonlinearFacto
|
|||
|
||||
// Update all of the internal variables with the new information
|
||||
gttic(augment_system);
|
||||
|
||||
// Add the new variables to theta
|
||||
theta_.insert(newTheta);
|
||||
// Add new variables to the end of the ordering
|
||||
std::vector<size_t> dims;
|
||||
dims.reserve(newTheta.size());
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
dims.push_back(key_value.value.dim());
|
||||
}
|
||||
// Augment Delta
|
||||
delta_.append(dims);
|
||||
for(size_t i = delta_.size() - dims.size(); i < delta_.size(); ++i) {
|
||||
delta_[i].setZero();
|
||||
}
|
||||
delta_.insert(newTheta.zeroVectors());
|
||||
|
||||
// Add the new factors to the graph, updating the variable index
|
||||
result.newFactorsIndices = insertFactors(newFactors);
|
||||
// Remove any user-specified factors from the graph
|
||||
|
@ -358,10 +354,7 @@ void ConcurrentBatchFilter::removeFactors(const std::vector<size_t>& slots) {
|
|||
gttic(remove_factors);
|
||||
|
||||
// For each factor slot to delete...
|
||||
SymbolicFactorGraph factors;
|
||||
BOOST_FOREACH(size_t slot, slots) {
|
||||
// Create a symbolic version for the variable index
|
||||
factors.push_back(factors_.at(slot)->symbolic(ordering_));
|
||||
|
||||
// Remove the factor from the graph
|
||||
factors_.remove(slot);
|
||||
|
@ -376,29 +369,13 @@ void ConcurrentBatchFilter::removeFactors(const std::vector<size_t>& slots) {
|
|||
/* ************************************************************************* */
|
||||
void ConcurrentBatchFilter::reorder(const boost::optional<FastList<Key> >& keysToMove) {
|
||||
|
||||
// Calculate the variable index
|
||||
VariableIndex variableIndex(*factors_.symbolic(ordering_), ordering_.size());
|
||||
|
||||
// COLAMD groups will be used to place marginalize keys in Group 0, and everything else in Group 1
|
||||
int group0 = 0;
|
||||
int group1 = (keysToMove && (keysToMove->size() > 0) ) ? 1 : 0;
|
||||
|
||||
// Initialize all variables to group1
|
||||
std::vector<int> cmember(variableIndex.size(), group1);
|
||||
|
||||
// Set all of the keysToMove to Group0
|
||||
if(keysToMove && keysToMove->size() > 0) {
|
||||
BOOST_FOREACH(Key key, *keysToMove) {
|
||||
cmember[ordering_.at(key)] = group0;
|
||||
}
|
||||
ordering_ = Ordering::COLAMDConstrainedFirst(factors_, std::vector<Key>(keysToMove->begin(), keysToMove->end()));
|
||||
}else{
|
||||
ordering_ = Ordering::COLAMD(factors_);
|
||||
}
|
||||
|
||||
// Generate the permutation
|
||||
Permutation forwardPermutation = *inference::PermutationCOLAMD_(variableIndex, cmember);
|
||||
|
||||
// Permute the ordering, variable index, and deltas
|
||||
ordering_.permuteInPlace(forwardPermutation);
|
||||
delta_.permuteInPlace(forwardPermutation);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -426,7 +403,7 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values
|
|||
double errorTol = parameters.errorTol;
|
||||
|
||||
// Create a Values that holds the current evaluation point
|
||||
Values evalpoint = theta.retract(delta, ordering);
|
||||
Values evalpoint = theta.retract(delta);
|
||||
result.error = factors.error(evalpoint);
|
||||
|
||||
// check if we're already close enough
|
||||
|
@ -447,9 +424,9 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values
|
|||
|
||||
// Do next iteration
|
||||
gttic(optimizer_iteration);
|
||||
{
|
||||
|
||||
// Linearize graph around the linearization point
|
||||
GaussianFactorGraph linearFactorGraph = *factors.linearize(theta, ordering);
|
||||
GaussianFactorGraph linearFactorGraph = *factors.linearize(theta);
|
||||
|
||||
// Keep increasing lambda until we make make progress
|
||||
while(true) {
|
||||
|
@ -461,24 +438,25 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values
|
|||
GaussianFactorGraph dampedFactorGraph(linearFactorGraph);
|
||||
dampedFactorGraph.reserve(linearFactorGraph.size() + delta.size());
|
||||
double sigma = 1.0 / std::sqrt(lambda);
|
||||
{
|
||||
// for each of the variables, add a prior at the current solution
|
||||
for(size_t j=0; j<delta.size(); ++j) {
|
||||
Matrix A = eye(delta[j].size());
|
||||
Vector b = delta[j];
|
||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(delta[j].size(), sigma);
|
||||
GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model));
|
||||
dampedFactorGraph.push_back(prior);
|
||||
}
|
||||
|
||||
// for each of the variables, add a prior at the current solution
|
||||
BOOST_FOREACH(const VectorValues::KeyValuePair& key_value, delta) {
|
||||
size_t dim = key_value.second.size();
|
||||
Matrix A = Matrix::Identity(dim,dim);
|
||||
Vector b = key_value.second;
|
||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, sigma);
|
||||
GaussianFactor::shared_ptr prior(new JacobianFactor(key_value.first, A, b, model));
|
||||
dampedFactorGraph.push_back(prior);
|
||||
}
|
||||
|
||||
gttoc(damp);
|
||||
result.lambdas++;
|
||||
|
||||
gttic(solve);
|
||||
// Solve Damped Gaussian Factor Graph
|
||||
newDelta = GaussianJunctionTree(dampedFactorGraph).optimize(parameters.getEliminationFunction());
|
||||
newDelta = dampedFactorGraph.optimize(ordering, parameters.getEliminationFunction());
|
||||
// update the evalpoint with the new delta
|
||||
evalpoint = theta.retract(newDelta, ordering);
|
||||
evalpoint = theta.retract(newDelta);
|
||||
gttoc(solve);
|
||||
|
||||
// Evaluate the new nonlinear error
|
||||
|
@ -503,10 +481,10 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values
|
|||
if(linearValues.size() > 0) {
|
||||
theta.update(linearValues);
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, linearValues) {
|
||||
Index index = ordering.at(key_value.key);
|
||||
delta.at(index) = newDelta.at(index);
|
||||
delta.at(key_value.key) = newDelta.at(key_value.key);
|
||||
}
|
||||
}
|
||||
|
||||
// Decrease lambda for next time
|
||||
lambda /= lambdaFactor;
|
||||
if(lambda < lambdaLowerBound) {
|
||||
|
@ -526,7 +504,7 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values
|
|||
}
|
||||
}
|
||||
} // end while
|
||||
}
|
||||
|
||||
gttoc(optimizer_iteration);
|
||||
|
||||
if(debug) { std::cout << "using lambda = " << lambda << std::endl; }
|
||||
|
@ -562,13 +540,17 @@ void ConcurrentBatchFilter::moveSeparator(const FastList<Key>& keysToMove) {
|
|||
|
||||
if(debug) { PrintKeys(keysToMove, "ConcurrentBatchFilter::moveSeparator ", "Keys To Move:", DefaultKeyFormatter); }
|
||||
|
||||
|
||||
// Identify all of the new factors to be sent to the smoother (any factor involving keysToMove)
|
||||
std::vector<size_t> removedFactorSlots;
|
||||
VariableIndex variableIndex(*factors_.symbolic(ordering_), theta_.size());
|
||||
VariableIndex variableIndex(factors_);
|
||||
BOOST_FOREACH(Key key, keysToMove) {
|
||||
const FastList<size_t>& slots = variableIndex[ordering_.at(key)];
|
||||
removedFactorSlots.insert(removedFactorSlots.end(), slots.begin(), slots.end());
|
||||
const FastList<size_t>& slots = variableIndex[key];
|
||||
BOOST_FOREACH(size_t slot, slots) {
|
||||
removedFactorSlots.insert(removedFactorSlots.end(), slots.begin(), slots.end());
|
||||
}
|
||||
}
|
||||
|
||||
// Sort and remove duplicates
|
||||
std::sort(removedFactorSlots.begin(), removedFactorSlots.end());
|
||||
removedFactorSlots.erase(std::unique(removedFactorSlots.begin(), removedFactorSlots.end()), removedFactorSlots.end());
|
||||
|
@ -677,22 +659,8 @@ void ConcurrentBatchFilter::moveSeparator(const FastList<Key>& keysToMove) {
|
|||
// Remove marginalized keys from values (and separator)
|
||||
BOOST_FOREACH(Key key, keysToMove) {
|
||||
theta_.erase(key);
|
||||
}
|
||||
|
||||
// Permute the ordering such that the removed keys are at the end.
|
||||
// This is a prerequisite for removing them from several structures
|
||||
std::vector<Index> toBack;
|
||||
BOOST_FOREACH(Key key, keysToMove) {
|
||||
toBack.push_back(ordering_.at(key));
|
||||
}
|
||||
Permutation forwardPermutation = Permutation::PushToBack(toBack, ordering_.size());
|
||||
ordering_.permuteInPlace(forwardPermutation);
|
||||
delta_.permuteInPlace(forwardPermutation);
|
||||
|
||||
// Remove marginalized keys from the ordering and delta
|
||||
for(size_t i = 0; i < keysToMove.size(); ++i) {
|
||||
ordering_.pop_back();
|
||||
delta_.pop_back();
|
||||
ordering_.erase(std::find(ordering_.begin(), ordering_.end(), key));
|
||||
delta_.erase(key);
|
||||
}
|
||||
|
||||
if(debug) std::cout << "ConcurrentBatchFilter::moveSeparator End" << std::endl;
|
||||
|
|
|
@ -225,7 +225,7 @@ private:
|
|||
const std::string& indent = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
/** Print just the nonlinear keys in each linear factor for a whole Gaussian Factor Graph */
|
||||
static void PrintLinearFactorGraph(const GaussianFactorGraph& factors, const Ordering& ordering,
|
||||
static void PrintLinearFactorGraph(const GaussianFactorGraph& factors,
|
||||
const std::string& indent = "", const std::string& title = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
/** Print just the nonlinear keys contained inside a container */
|
||||
|
|
|
@ -58,18 +58,15 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearF
|
|||
{
|
||||
// Add the new variables to theta
|
||||
theta_.insert(newTheta);
|
||||
|
||||
// Add new variables to the end of the ordering
|
||||
std::vector<size_t> dims;
|
||||
dims.reserve(newTheta.size());
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
dims.push_back(key_value.value.dim());
|
||||
}
|
||||
|
||||
// Augment Delta
|
||||
delta_.append(dims);
|
||||
for(size_t i = delta_.size() - dims.size(); i < delta_.size(); ++i) {
|
||||
delta_[i].setZero();
|
||||
}
|
||||
delta_.insert(newTheta.zeroVectors());
|
||||
|
||||
// Add the new factors to the graph, updating the variable index
|
||||
insertFactors(newFactors);
|
||||
}
|
||||
|
@ -134,35 +131,29 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa
|
|||
removeFactors(filterSummarizationSlots_);
|
||||
|
||||
// Insert new linpoints into the values, augment the ordering, and store new dims to augment delta
|
||||
std::vector<size_t> dims;
|
||||
dims.reserve(smootherValues.size() + separatorValues.size());
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, smootherValues) {
|
||||
Values::iterator iter = theta_.find(key_value.key);
|
||||
if(iter == theta_.end()) {
|
||||
theta_.insert(key_value.key, key_value.value);
|
||||
std::pair<Values::iterator, bool> iter_inserted = theta_.tryInsert(key_value.key, key_value.value);
|
||||
if(iter_inserted.second) {
|
||||
// If the insert succeeded
|
||||
ordering_.push_back(key_value.key);
|
||||
dims.push_back(key_value.value.dim());
|
||||
delta_.insert(key_value.key, Vector::Zero(key_value.value.dim()));
|
||||
} else {
|
||||
iter->value = key_value.value;
|
||||
// If the element already existed in theta_
|
||||
iter_inserted.first->value = key_value.value;
|
||||
}
|
||||
}
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, separatorValues) {
|
||||
Values::iterator iter = theta_.find(key_value.key);
|
||||
if(iter == theta_.end()) {
|
||||
theta_.insert(key_value.key, key_value.value);
|
||||
std::pair<Values::iterator, bool> iter_inserted = theta_.tryInsert(key_value.key, key_value.value);
|
||||
if(iter_inserted.second) {
|
||||
// If the insert succeeded
|
||||
ordering_.push_back(key_value.key);
|
||||
dims.push_back(key_value.value.dim());
|
||||
delta_.insert(key_value.key, Vector::Zero(key_value.value.dim()));
|
||||
} else {
|
||||
iter->value = key_value.value;
|
||||
// If the element already existed in theta_
|
||||
iter_inserted.first->value = key_value.value;
|
||||
}
|
||||
}
|
||||
|
||||
// Augment Delta
|
||||
delta_.append(dims);
|
||||
for(size_t i = delta_.size() - dims.size(); i < delta_.size(); ++i) {
|
||||
delta_[i].setZero();
|
||||
}
|
||||
|
||||
// Insert the new smoother factors
|
||||
insertFactors(smootherFactors);
|
||||
|
||||
|
@ -217,10 +208,7 @@ void ConcurrentBatchSmoother::removeFactors(const std::vector<size_t>& slots) {
|
|||
gttic(remove_factors);
|
||||
|
||||
// For each factor slot to delete...
|
||||
SymbolicFactorGraph factors;
|
||||
BOOST_FOREACH(size_t slot, slots) {
|
||||
// Create a symbolic version for the variable index
|
||||
factors.push_back(factors_.at(slot)->symbolic(ordering_));
|
||||
|
||||
// Remove the factor from the graph
|
||||
factors_.remove(slot);
|
||||
|
@ -236,25 +224,11 @@ void ConcurrentBatchSmoother::removeFactors(const std::vector<size_t>& slots) {
|
|||
void ConcurrentBatchSmoother::reorder() {
|
||||
|
||||
// Recalculate the variable index
|
||||
variableIndex_ = VariableIndex(*factors_.symbolic(ordering_));
|
||||
variableIndex_ = VariableIndex(factors_);
|
||||
|
||||
// Initialize all variables to group0
|
||||
std::vector<int> cmember(variableIndex_.size(), 0);
|
||||
FastList<Key> separatorKeys = separatorValues_.keys();
|
||||
ordering_ = Ordering::COLAMDConstrainedLast(variableIndex_, std::vector<Key>(separatorKeys.begin(), separatorKeys.end()));
|
||||
|
||||
// Set all of the separator keys to Group1
|
||||
if(separatorValues_.size() > 0) {
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, separatorValues_) {
|
||||
cmember[ordering_.at(key_value.key)] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Generate the permutation
|
||||
Permutation forwardPermutation = *inference::PermutationCOLAMD_(variableIndex_, cmember);
|
||||
|
||||
// Permute the ordering, variable index, and deltas
|
||||
ordering_.permuteInPlace(forwardPermutation);
|
||||
variableIndex_.permuteInPlace(forwardPermutation);
|
||||
delta_.permuteInPlace(forwardPermutation);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
@ -271,7 +245,7 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() {
|
|||
double lambda = parameters_.lambdaInitial;
|
||||
|
||||
// Create a Values that holds the current evaluation point
|
||||
Values evalpoint = theta_.retract(delta_, ordering_);
|
||||
Values evalpoint = theta_.retract(delta_);
|
||||
result.error = factors_.error(evalpoint);
|
||||
if(result.error < parameters_.errorTol) {
|
||||
return result;
|
||||
|
@ -287,7 +261,7 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() {
|
|||
gttic(optimizer_iteration);
|
||||
{
|
||||
// Linearize graph around the linearization point
|
||||
GaussianFactorGraph linearFactorGraph = *factors_.linearize(theta_, ordering_);
|
||||
GaussianFactorGraph linearFactorGraph = *factors_.linearize(theta_);
|
||||
|
||||
// Keep increasing lambda until we make make progress
|
||||
while(true) {
|
||||
|
@ -300,11 +274,12 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() {
|
|||
dampedFactorGraph.reserve(linearFactorGraph.size() + delta_.size());
|
||||
{
|
||||
// for each of the variables, add a prior at the current solution
|
||||
for(size_t j=0; j<delta_.size(); ++j) {
|
||||
Matrix A = eye(delta_[j].size());
|
||||
Vector b = delta_[j];
|
||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(delta_[j].size(), 1.0 / std::sqrt(lambda));
|
||||
GaussianFactor::shared_ptr prior(new JacobianFactor(j, A, b, model));
|
||||
BOOST_FOREACH(const VectorValues::KeyValuePair& key_value, delta_) {
|
||||
size_t dim = key_value.second.size();
|
||||
Matrix A = Matrix::Identity(dim,dim);
|
||||
Vector b = key_value.second;
|
||||
SharedDiagonal model = noiseModel::Isotropic::Sigma(dim, 1.0 / std::sqrt(lambda));
|
||||
GaussianFactor::shared_ptr prior(new JacobianFactor(key_value.first, A, b, model));
|
||||
dampedFactorGraph.push_back(prior);
|
||||
}
|
||||
}
|
||||
|
@ -312,12 +287,12 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() {
|
|||
if (lmVerbosity >= LevenbergMarquardtParams::DAMPED)
|
||||
dampedFactorGraph.print("damped");
|
||||
result.lambdas++;
|
||||
|
||||
|
||||
gttic(solve);
|
||||
// Solve Damped Gaussian Factor Graph
|
||||
newDelta = GaussianJunctionTree(dampedFactorGraph).optimize(parameters_.getEliminationFunction());
|
||||
newDelta = dampedFactorGraph.optimize(ordering_, parameters_.getEliminationFunction());
|
||||
// update the evalpoint with the new delta
|
||||
evalpoint = theta_.retract(newDelta, ordering_);
|
||||
evalpoint = theta_.retract(newDelta);
|
||||
gttoc(solve);
|
||||
|
||||
if (lmVerbosity >= LevenbergMarquardtParams::TRYLAMBDA)
|
||||
|
@ -345,10 +320,10 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() {
|
|||
if(separatorValues_.size() > 0) {
|
||||
theta_.update(separatorValues_);
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, separatorValues_) {
|
||||
Index index = ordering_.at(key_value.key);
|
||||
delta_.at(index) = newDelta.at(index);
|
||||
delta_.at(key_value.key) = newDelta.at(key_value.key);
|
||||
}
|
||||
}
|
||||
|
||||
// Decrease lambda for next time
|
||||
lambda /= parameters_.lambdaFactor;
|
||||
// End this lambda search iteration
|
||||
|
@ -421,12 +396,12 @@ void ConcurrentBatchSmoother::PrintNonlinearFactor(const NonlinearFactor::shared
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void ConcurrentBatchSmoother::PrintLinearFactor(const GaussianFactor::shared_ptr& factor, const Ordering& ordering, const std::string& indent, const KeyFormatter& keyFormatter) {
|
||||
void ConcurrentBatchSmoother::PrintLinearFactor(const GaussianFactor::shared_ptr& factor, const std::string& indent, const KeyFormatter& keyFormatter) {
|
||||
std::cout << indent;
|
||||
if(factor) {
|
||||
std::cout << "g( ";
|
||||
BOOST_FOREACH(Index index, *factor) {
|
||||
std::cout << keyFormatter(ordering.key(index)) << " ";
|
||||
BOOST_FOREACH(Key key, *factor) {
|
||||
std::cout << keyFormatter(key) << " ";
|
||||
}
|
||||
std::cout << ")" << std::endl;
|
||||
} else {
|
||||
|
|
|
@ -99,8 +99,7 @@ public:
|
|||
*/
|
||||
template<class VALUE>
|
||||
VALUE calculateEstimate(Key key) const {
|
||||
const Index index = ordering_.at(key);
|
||||
const Vector delta = delta_.at(index);
|
||||
const Vector delta = delta_.at(key);
|
||||
return theta_.at<VALUE>(key).retract(delta);
|
||||
}
|
||||
|
||||
|
@ -196,7 +195,7 @@ private:
|
|||
const std::string& indent = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
/** Print just the nonlinear keys in a linear factor */
|
||||
static void PrintLinearFactor(const GaussianFactor::shared_ptr& factor, const Ordering& ordering,
|
||||
static void PrintLinearFactor(const GaussianFactor::shared_ptr& factor,
|
||||
const std::string& indent = "", const KeyFormatter& keyFormatter = DefaultKeyFormatter);
|
||||
|
||||
}; // ConcurrentBatchSmoother
|
||||
|
|
|
@ -50,151 +50,11 @@ void synchronize(ConcurrentFilter& filter, ConcurrentSmoother& smoother) {
|
|||
|
||||
namespace internal {
|
||||
|
||||
// TODO: Remove this and replace with the standard Elimination Tree once GTSAM 3.0 is released and supports forests
|
||||
// A custom elimination tree that supports forests and partial elimination
|
||||
class EliminationForest {
|
||||
public:
|
||||
typedef boost::shared_ptr<EliminationForest> shared_ptr; ///< Shared pointer to this class
|
||||
|
||||
private:
|
||||
typedef FastList<GaussianFactor::shared_ptr> Factors;
|
||||
typedef FastList<shared_ptr> SubTrees;
|
||||
typedef std::vector<GaussianConditional::shared_ptr> Conditionals;
|
||||
|
||||
Index key_; ///< index associated with root
|
||||
Factors factors_; ///< factors associated with root
|
||||
SubTrees subTrees_; ///< sub-trees
|
||||
|
||||
/** default constructor, private, as you should use Create below */
|
||||
EliminationForest(Index key = 0) : key_(key) {}
|
||||
|
||||
/**
|
||||
* Static internal function to build a vector of parent pointers using the
|
||||
* algorithm of Gilbert et al., 2001, BIT.
|
||||
*/
|
||||
static std::vector<Index> ComputeParents(const VariableIndex& structure);
|
||||
|
||||
/** add a factor, for Create use only */
|
||||
void add(const GaussianFactor::shared_ptr& factor) { factors_.push_back(factor); }
|
||||
|
||||
/** add a subtree, for Create use only */
|
||||
void add(const shared_ptr& child) { subTrees_.push_back(child); }
|
||||
|
||||
public:
|
||||
|
||||
/** return the key associated with this tree node */
|
||||
Index key() const { return key_; }
|
||||
|
||||
/** return the const reference of children */
|
||||
const SubTrees& children() const { return subTrees_; }
|
||||
|
||||
/** return the const reference to the factors */
|
||||
const Factors& factors() const { return factors_; }
|
||||
|
||||
/** Create an elimination tree from a factor graph */
|
||||
static std::vector<shared_ptr> Create(const GaussianFactorGraph& factorGraph, const VariableIndex& structure);
|
||||
|
||||
/** Recursive routine that eliminates the factors arranged in an elimination tree */
|
||||
GaussianFactor::shared_ptr eliminateRecursive(GaussianFactorGraph::Eliminate function);
|
||||
|
||||
/** Recursive function that helps find the top of each tree */
|
||||
static void removeChildrenIndices(std::set<Index>& indices, const EliminationForest::shared_ptr& tree);
|
||||
};
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::vector<Index> EliminationForest::ComputeParents(const VariableIndex& structure) {
|
||||
// Number of factors and variables
|
||||
const size_t m = structure.nFactors();
|
||||
const size_t n = structure.size();
|
||||
|
||||
static const Index none = std::numeric_limits<Index>::max();
|
||||
|
||||
// Allocate result parent vector and vector of last factor columns
|
||||
std::vector<Index> parents(n, none);
|
||||
std::vector<Index> prevCol(m, none);
|
||||
|
||||
// for column j \in 1 to n do
|
||||
for (Index j = 0; j < n; j++) {
|
||||
// for row i \in Struct[A*j] do
|
||||
BOOST_FOREACH(const size_t i, structure[j]) {
|
||||
if (prevCol[i] != none) {
|
||||
Index k = prevCol[i];
|
||||
// find root r of the current tree that contains k
|
||||
Index r = k;
|
||||
while (parents[r] != none)
|
||||
r = parents[r];
|
||||
if (r != j) parents[r] = j;
|
||||
}
|
||||
prevCol[i] = j;
|
||||
}
|
||||
}
|
||||
|
||||
return parents;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
std::vector<EliminationForest::shared_ptr> EliminationForest::Create(const GaussianFactorGraph& factorGraph, const VariableIndex& structure) {
|
||||
// Compute the tree structure
|
||||
std::vector<Index> parents(ComputeParents(structure));
|
||||
|
||||
// Number of variables
|
||||
const size_t n = structure.size();
|
||||
|
||||
static const Index none = std::numeric_limits<Index>::max();
|
||||
|
||||
// Create tree structure
|
||||
std::vector<shared_ptr> trees(n);
|
||||
for (Index k = 1; k <= n; k++) {
|
||||
Index j = n - k; // Start at the last variable and loop down to 0
|
||||
trees[j].reset(new EliminationForest(j)); // Create a new node on this variable
|
||||
if (parents[j] != none) // If this node has a parent, add it to the parent's children
|
||||
trees[parents[j]]->add(trees[j]);
|
||||
}
|
||||
|
||||
// Hang factors in right places
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, factorGraph) {
|
||||
if(factor && factor->size() > 0) {
|
||||
Index j = *std::min_element(factor->begin(), factor->end());
|
||||
if(j < structure.size())
|
||||
trees[j]->add(factor);
|
||||
}
|
||||
}
|
||||
|
||||
return trees;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
GaussianFactor::shared_ptr EliminationForest::eliminateRecursive(GaussianFactorGraph::Eliminate function) {
|
||||
|
||||
// Create the list of factors to be eliminated, initially empty, and reserve space
|
||||
GaussianFactorGraph factors;
|
||||
factors.reserve(this->factors_.size() + this->subTrees_.size());
|
||||
|
||||
// Add all factors associated with the current node
|
||||
factors.push_back(this->factors_.begin(), this->factors_.end());
|
||||
|
||||
// for all subtrees, eliminate into Bayes net and a separator factor, added to [factors]
|
||||
BOOST_FOREACH(const shared_ptr& child, subTrees_)
|
||||
factors.push_back(child->eliminateRecursive(function));
|
||||
|
||||
// Combine all factors (from this node and from subtrees) into a joint factor
|
||||
GaussianFactorGraph::EliminationResult eliminated(function(factors, 1));
|
||||
|
||||
return eliminated.second;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
void EliminationForest::removeChildrenIndices(std::set<Index>& indices, const EliminationForest::shared_ptr& tree) {
|
||||
BOOST_FOREACH(const EliminationForest::shared_ptr& child, tree->children()) {
|
||||
indices.erase(child->key());
|
||||
removeChildrenIndices(indices, child);
|
||||
}
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
NonlinearFactorGraph calculateMarginalFactors(const NonlinearFactorGraph& graph, const Values& theta,
|
||||
const FastSet<Key>& remainingKeys, const GaussianFactorGraph::Eliminate& eliminateFunction) {
|
||||
|
||||
|
||||
// Calculate the set of RootKeys = AllKeys \Intersect RemainingKeys
|
||||
FastSet<Key> rootKeys;
|
||||
FastSet<Key> allKeys(graph.keys());
|
||||
|
@ -208,59 +68,16 @@ NonlinearFactorGraph calculateMarginalFactors(const NonlinearFactorGraph& graph,
|
|||
// There are no keys to marginalize. Simply return the input factors
|
||||
return graph;
|
||||
} else {
|
||||
// Create a subset of theta that only contains the required keys
|
||||
Values values;
|
||||
BOOST_FOREACH(Key key, allKeys) {
|
||||
values.insert(key, theta.at(key));
|
||||
}
|
||||
|
||||
// Calculate the ordering: [Others Root]
|
||||
std::map<Key, int> constraints;
|
||||
BOOST_FOREACH(Key key, marginalizeKeys) {
|
||||
constraints[key] = 0;
|
||||
}
|
||||
BOOST_FOREACH(Key key, rootKeys) {
|
||||
constraints[key] = 1;
|
||||
}
|
||||
Ordering ordering = *graph.orderingCOLAMDConstrained(values, constraints);
|
||||
|
||||
// Create the linear factor graph
|
||||
GaussianFactorGraph linearFactorGraph = *graph.linearize(values, ordering);
|
||||
GaussianFactorGraph linearFactorGraph = *graph.linearize(theta);
|
||||
// .first is the eliminated Bayes tree, while .second is the remaining factor graph
|
||||
GaussianFactorGraph marginalLinearFactors = *linearFactorGraph.eliminatePartialMultifrontal(std::vector<Key>(marginalizeKeys.begin(), marginalizeKeys.end())).second;
|
||||
|
||||
// Construct a variable index
|
||||
VariableIndex variableIndex(linearFactorGraph, ordering.size());
|
||||
|
||||
// Construct an elimination tree to perform sparse elimination
|
||||
std::vector<EliminationForest::shared_ptr> forest( EliminationForest::Create(linearFactorGraph, variableIndex) );
|
||||
|
||||
// This is a forest. Only the top-most node/index of each tree needs to be eliminated; all of the children will be eliminated automatically
|
||||
// Find the subset of nodes/keys that must be eliminated
|
||||
std::set<Index> indicesToEliminate;
|
||||
BOOST_FOREACH(Key key, marginalizeKeys) {
|
||||
indicesToEliminate.insert(ordering.at(key));
|
||||
}
|
||||
BOOST_FOREACH(Key key, marginalizeKeys) {
|
||||
EliminationForest::removeChildrenIndices(indicesToEliminate, forest.at(ordering.at(key)));
|
||||
}
|
||||
|
||||
// Eliminate each top-most key, returning a Gaussian Factor on some of the remaining variables
|
||||
// Convert the marginal factors into Linear Container Factors
|
||||
// Wrap in nonlinear container factors
|
||||
NonlinearFactorGraph marginalFactors;
|
||||
BOOST_FOREACH(Index index, indicesToEliminate) {
|
||||
GaussianFactor::shared_ptr gaussianFactor = forest.at(index)->eliminateRecursive(eliminateFunction);
|
||||
if(gaussianFactor->size() > 0) {
|
||||
LinearContainerFactor::shared_ptr marginalFactor(new LinearContainerFactor(gaussianFactor, ordering, values));
|
||||
marginalFactors.push_back(marginalFactor);
|
||||
}
|
||||
}
|
||||
|
||||
// Also add any remaining factors that were unaffected by marginalizing out the selected variables.
|
||||
// These are part of the marginal on the remaining variables as well.
|
||||
BOOST_FOREACH(Key key, rootKeys) {
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& gaussianFactor, forest.at(ordering.at(key))->factors()) {
|
||||
LinearContainerFactor::shared_ptr marginalFactor(new LinearContainerFactor(gaussianFactor, ordering, values));
|
||||
marginalFactors.push_back(marginalFactor);
|
||||
}
|
||||
marginalFactors.reserve(marginalLinearFactors.size());
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& gaussianFactor, marginalLinearFactors) {
|
||||
marginalFactors += boost::make_shared<LinearContainerFactor>(gaussianFactor, theta);
|
||||
}
|
||||
|
||||
return marginalFactors;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <gtsam_unstable/base/dllexport.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/linear/GaussianFactorGraph.h>
|
||||
|
||||
namespace gtsam {
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
||||
|
||||
/* ************************************************************************* */
|
||||
bool check_smoother(const NonlinearFactorGraph& fullgraph, const Values& fullinit, const BatchFixedLagSmoother& smoother, const Key& key) {
|
||||
|
||||
|
@ -83,7 +84,7 @@ TEST( BatchFixedLagSmoother, Example )
|
|||
Values newValues;
|
||||
Timestamps newTimestamps;
|
||||
|
||||
newFactors.add(PriorFactor<Point2>(key0, Point2(0.0, 0.0), odometerNoise));
|
||||
newFactors.push_back(PriorFactor<Point2>(key0, Point2(0.0, 0.0), odometerNoise));
|
||||
newValues.insert(key0, Point2(0.01, 0.01));
|
||||
newTimestamps[key0] = 0.0;
|
||||
|
||||
|
@ -108,7 +109,7 @@ TEST( BatchFixedLagSmoother, Example )
|
|||
Values newValues;
|
||||
Timestamps newTimestamps;
|
||||
|
||||
newFactors.add(BetweenFactor<Point2>(key1, key2, Point2(1.0, 0.0), odometerNoise));
|
||||
newFactors.push_back(BetweenFactor<Point2>(key1, key2, Point2(1.0, 0.0), odometerNoise));
|
||||
newValues.insert(key2, Point2(double(i)+0.1, -0.1));
|
||||
newTimestamps[key2] = double(i);
|
||||
|
||||
|
@ -134,8 +135,8 @@ TEST( BatchFixedLagSmoother, Example )
|
|||
Values newValues;
|
||||
Timestamps newTimestamps;
|
||||
|
||||
newFactors.add(BetweenFactor<Point2>(key1, key2, Point2(1.0, 0.0), odometerNoise));
|
||||
newFactors.add(BetweenFactor<Point2>(Key(2), Key(5), Point2(3.5, 0.0), loopNoise));
|
||||
newFactors.push_back(BetweenFactor<Point2>(key1, key2, Point2(1.0, 0.0), odometerNoise));
|
||||
newFactors.push_back(BetweenFactor<Point2>(Key(2), Key(5), Point2(3.5, 0.0), loopNoise));
|
||||
newValues.insert(key2, Point2(double(i)+0.1, -0.1));
|
||||
newTimestamps[key2] = double(i);
|
||||
|
||||
|
@ -160,7 +161,7 @@ TEST( BatchFixedLagSmoother, Example )
|
|||
Values newValues;
|
||||
Timestamps newTimestamps;
|
||||
|
||||
newFactors.add(BetweenFactor<Point2>(key1, key2, Point2(1.0, 0.0), odometerNoise));
|
||||
newFactors.push_back(BetweenFactor<Point2>(key1, key2, Point2(1.0, 0.0), odometerNoise));
|
||||
newValues.insert(key2, Point2(double(i)+0.1, -0.1));
|
||||
newTimestamps[key2] = double(i);
|
||||
|
||||
|
|
|
@ -23,10 +23,10 @@
|
|||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LinearContainerFactor.h>
|
||||
#include <gtsam/nonlinear/Ordering.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/nonlinear/Symbol.h>
|
||||
#include <gtsam/nonlinear/Key.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/inference/Key.h>
|
||||
#include <gtsam/inference/JunctionTree.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
|
@ -79,19 +79,13 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph,
|
|||
ordering.push_back(key);
|
||||
}
|
||||
|
||||
GaussianFactorGraph linearGraph = *factorGraph.linearize(linPoint, ordering);
|
||||
GaussianFactorGraph linearGraph = *factorGraph.linearize(linPoint);
|
||||
|
||||
// Create the set of marginalizable variables
|
||||
std::vector<Index> linearIndices;
|
||||
BOOST_FOREACH(Key key, keysToMarginalize) {
|
||||
linearIndices.push_back(ordering.at(key));
|
||||
}
|
||||
|
||||
std::pair<GaussianConditional::shared_ptr, GaussianFactorGraph> marginal = linearGraph.eliminate(linearIndices, EliminateCholesky);
|
||||
GaussianFactorGraph marginal = *linearGraph.eliminatePartialMultifrontal(vector<Key>(keysToMarginalize.begin(), keysToMarginalize.end()), EliminateCholesky).second;
|
||||
|
||||
NonlinearFactorGraph LinearContainerForGaussianMarginals;
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, marginal.second) {
|
||||
LinearContainerForGaussianMarginals.add(LinearContainerFactor(factor, ordering, linPoint));
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, marginal) {
|
||||
LinearContainerForGaussianMarginals.push_back(LinearContainerFactor(factor, linPoint));
|
||||
}
|
||||
return LinearContainerForGaussianMarginals;
|
||||
}
|
||||
|
@ -140,8 +134,8 @@ TEST( ConcurrentBatchFilter, getFactors )
|
|||
|
||||
// Add some factors to the filter
|
||||
NonlinearFactorGraph newFactors1;
|
||||
newFactors1.add(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors1.add(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
newFactors1.push_back(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors1.push_back(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
Values newValues1;
|
||||
newValues1.insert(1, Pose3());
|
||||
newValues1.insert(2, newValues1.at<Pose3>(1).compose(poseOdometry));
|
||||
|
@ -157,8 +151,8 @@ TEST( ConcurrentBatchFilter, getFactors )
|
|||
|
||||
// Add some more factors to the filter
|
||||
NonlinearFactorGraph newFactors2;
|
||||
newFactors2.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors2.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
Values newValues2;
|
||||
newValues2.insert(3, newValues1.at<Pose3>(2).compose(poseOdometry));
|
||||
newValues2.insert(4, newValues2.at<Pose3>(3).compose(poseOdometry));
|
||||
|
@ -190,8 +184,8 @@ TEST( ConcurrentBatchFilter, getLinearizationPoint )
|
|||
|
||||
// Add some factors to the filter
|
||||
NonlinearFactorGraph newFactors1;
|
||||
newFactors1.add(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors1.add(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
newFactors1.push_back(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors1.push_back(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
Values newValues1;
|
||||
newValues1.insert(1, Pose3());
|
||||
newValues1.insert(2, newValues1.at<Pose3>(1).compose(poseOdometry));
|
||||
|
@ -207,8 +201,8 @@ TEST( ConcurrentBatchFilter, getLinearizationPoint )
|
|||
|
||||
// Add some more factors to the filter
|
||||
NonlinearFactorGraph newFactors2;
|
||||
newFactors2.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors2.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
Values newValues2;
|
||||
newValues2.insert(3, newValues1.at<Pose3>(2).compose(poseOdometry));
|
||||
newValues2.insert(4, newValues2.at<Pose3>(3).compose(poseOdometry));
|
||||
|
@ -252,8 +246,8 @@ TEST( ConcurrentBatchFilter, calculateEstimate )
|
|||
|
||||
// Add some factors to the filter
|
||||
NonlinearFactorGraph newFactors2;
|
||||
newFactors2.add(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors2.add(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
Values newValues2;
|
||||
newValues2.insert(1, Pose3().compose(poseError));
|
||||
newValues2.insert(2, newValues2.at<Pose3>(1).compose(poseOdometry).compose(poseError));
|
||||
|
@ -272,8 +266,8 @@ TEST( ConcurrentBatchFilter, calculateEstimate )
|
|||
|
||||
// Add some more factors to the filter
|
||||
NonlinearFactorGraph newFactors3;
|
||||
newFactors3.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors3.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
newFactors3.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors3.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
Values newValues3;
|
||||
newValues3.insert(3, newValues2.at<Pose3>(2).compose(poseOdometry).compose(poseError));
|
||||
newValues3.insert(4, newValues3.at<Pose3>(3).compose(poseOdometry).compose(poseError));
|
||||
|
@ -336,8 +330,8 @@ TEST( ConcurrentBatchFilter, update_multiple )
|
|||
|
||||
// Add some factors to the filter
|
||||
NonlinearFactorGraph newFactors2;
|
||||
newFactors2.add(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors2.add(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
Values newValues2;
|
||||
newValues2.insert(1, Pose3().compose(poseError));
|
||||
newValues2.insert(2, newValues2.at<Pose3>(1).compose(poseOdometry).compose(poseError));
|
||||
|
@ -356,8 +350,8 @@ TEST( ConcurrentBatchFilter, update_multiple )
|
|||
|
||||
// Add some more factors to the filter
|
||||
NonlinearFactorGraph newFactors3;
|
||||
newFactors3.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors3.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
newFactors3.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors3.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
Values newValues3;
|
||||
newValues3.insert(3, newValues2.at<Pose3>(2).compose(poseOdometry).compose(poseError));
|
||||
newValues3.insert(4, newValues3.at<Pose3>(3).compose(poseOdometry).compose(poseError));
|
||||
|
@ -386,10 +380,10 @@ TEST( ConcurrentBatchFilter, update_and_marginalize )
|
|||
|
||||
// Add some factors to the filter
|
||||
NonlinearFactorGraph newFactors;
|
||||
newFactors.add(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors.add(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
newFactors.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
newFactors.push_back(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors.push_back(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
newFactors.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
Values newValues;
|
||||
newValues.insert(1, Pose3().compose(poseError));
|
||||
newValues.insert(2, newValues.at<Pose3>(1).compose(poseOdometry).compose(poseError));
|
||||
|
@ -409,9 +403,9 @@ TEST( ConcurrentBatchFilter, update_and_marginalize )
|
|||
|
||||
|
||||
NonlinearFactorGraph partialGraph;
|
||||
partialGraph.add(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
partialGraph.add(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
partialGraph.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
partialGraph.push_back(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
partialGraph.push_back(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
partialGraph.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
|
||||
Values partialValues;
|
||||
partialValues.insert(1, optimalValues.at<Pose3>(1));
|
||||
|
@ -425,12 +419,12 @@ TEST( ConcurrentBatchFilter, update_and_marginalize )
|
|||
ordering.push_back(3);
|
||||
|
||||
// Create the set of marginalizable variables
|
||||
std::vector<Index> linearIndices;
|
||||
linearIndices.push_back(ordering.at(1));
|
||||
linearIndices.push_back(ordering.at(2));
|
||||
std::vector<Key> linearIndices;
|
||||
linearIndices.push_back(1);
|
||||
linearIndices.push_back(2);
|
||||
|
||||
GaussianFactorGraph linearPartialGraph = *partialGraph.linearize(partialValues, ordering);
|
||||
std::pair<GaussianConditional::shared_ptr, GaussianFactorGraph> result = linearPartialGraph.eliminate(linearIndices, EliminateCholesky);
|
||||
GaussianFactorGraph linearPartialGraph = *partialGraph.linearize(partialValues);
|
||||
GaussianFactorGraph result = *linearPartialGraph.eliminatePartialMultifrontal(linearIndices, EliminateCholesky).second;
|
||||
|
||||
|
||||
NonlinearFactorGraph expectedGraph;
|
||||
|
@ -443,10 +437,10 @@ TEST( ConcurrentBatchFilter, update_and_marginalize )
|
|||
expectedGraph.push_back(NonlinearFactor::shared_ptr());
|
||||
expectedGraph.push_back(NonlinearFactor::shared_ptr());
|
||||
// ==========================================================
|
||||
expectedGraph.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
expectedGraph.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, result.second) {
|
||||
expectedGraph.add(LinearContainerFactor(factor, ordering, partialValues));
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, result) {
|
||||
expectedGraph.push_back(LinearContainerFactor(factor, partialValues));
|
||||
}
|
||||
|
||||
|
||||
|
@ -512,8 +506,8 @@ TEST( ConcurrentBatchFilter, synchronize_1 )
|
|||
// Insert factors into the filter, but do not marginalize out any variables.
|
||||
// The synchronization should still be empty
|
||||
NonlinearFactorGraph newFactors;
|
||||
newFactors.add(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors.add(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
newFactors.push_back(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors.push_back(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
Values newValues;
|
||||
newValues.insert(1, Pose3().compose(poseError));
|
||||
newValues.insert(2, newValues.at<Pose3>(1).compose(poseOdometry).compose(poseError));
|
||||
|
@ -667,7 +661,7 @@ TEST( ConcurrentBatchFilter, synchronize_3 )
|
|||
expectedFilterSeparatorValues.insert(2, optimalValues.at<Pose3>(2));
|
||||
// ------------------------------------------------------------------------------
|
||||
NonlinearFactorGraph partialGraph;
|
||||
partialGraph.add(factor3);
|
||||
partialGraph.push_back(factor3);
|
||||
|
||||
Values partialValues;
|
||||
partialValues.insert(2, optimalValues.at<Pose3>(2));
|
||||
|
@ -752,7 +746,7 @@ TEST( ConcurrentBatchFilter, synchronize_4 )
|
|||
expectedFilterSeparatorValues.insert(2, optimalValuesFilter.at<Pose3>(2));
|
||||
// ------------------------------------------------------------------------------
|
||||
NonlinearFactorGraph partialGraphFilter;
|
||||
partialGraphFilter.add(factor3);
|
||||
partialGraphFilter.push_back(factor3);
|
||||
|
||||
Values partialValuesFilter;
|
||||
partialValuesFilter.insert(2, optimalValuesFilter.at<Pose3>(2));
|
||||
|
@ -842,8 +836,8 @@ TEST( ConcurrentBatchFilter, synchronize_5 )
|
|||
filter.postsync();
|
||||
|
||||
NonlinearFactorGraph expectedSmootherFactors;
|
||||
expectedSmootherFactors.add(factor1);
|
||||
expectedSmootherFactors.add(factor2);
|
||||
expectedSmootherFactors.push_back(factor1);
|
||||
expectedSmootherFactors.push_back(factor2);
|
||||
|
||||
Values optimalValues = BatchOptimize(newFactors, newValues, 1);
|
||||
Values expectedSmootherValues;
|
||||
|
@ -875,11 +869,11 @@ TEST( ConcurrentBatchFilter, synchronize_5 )
|
|||
// currently the smoother contains factor 1 and 2 and node 1 and 2
|
||||
|
||||
NonlinearFactorGraph partialGraph;
|
||||
partialGraph.add(factor1);
|
||||
partialGraph.add(factor2);
|
||||
partialGraph.push_back(factor1);
|
||||
partialGraph.push_back(factor2);
|
||||
|
||||
// we also assume that the smoother received an extra factor (e.g., a prior on 1)
|
||||
partialGraph.add(factor1);
|
||||
partialGraph.push_back(factor1);
|
||||
|
||||
Values partialValues;
|
||||
// Batch optimization updates all linearization points but the ones of the separator
|
||||
|
@ -908,8 +902,8 @@ TEST( ConcurrentBatchFilter, synchronize_5 )
|
|||
filter.postsync();
|
||||
|
||||
NonlinearFactorGraph expectedSmootherFactors2;
|
||||
expectedSmootherFactors2.add(factor3);
|
||||
expectedSmootherFactors2.add(factor4);
|
||||
expectedSmootherFactors2.push_back(factor3);
|
||||
expectedSmootherFactors2.push_back(factor4);
|
||||
|
||||
Values expectedSmootherValues2;
|
||||
expectedSmootherValues2.insert(2, optimalValues.at(2));
|
||||
|
@ -924,7 +918,7 @@ TEST( ConcurrentBatchFilter, synchronize_5 )
|
|||
// ------------------------------------------------------------------------------
|
||||
// This cannot be nonempty for the first call to synchronize
|
||||
NonlinearFactorGraph partialGraphFilter;
|
||||
partialGraphFilter.add(factor5);
|
||||
partialGraphFilter.push_back(factor5);
|
||||
|
||||
|
||||
Values partialValuesFilter;
|
||||
|
@ -944,8 +938,8 @@ TEST( ConcurrentBatchFilter, synchronize_5 )
|
|||
// Now we should check that the smooother summarization on the old separator is correctly propagated
|
||||
// on the new separator by the filter
|
||||
NonlinearFactorGraph partialGraphTransition;
|
||||
partialGraphTransition.add(factor3);
|
||||
partialGraphTransition.add(factor4);
|
||||
partialGraphTransition.push_back(factor3);
|
||||
partialGraphTransition.push_back(factor4);
|
||||
partialGraphTransition.push_back(smootherSummarization2);
|
||||
|
||||
Values partialValuesTransition;
|
||||
|
@ -967,7 +961,7 @@ TEST( ConcurrentBatchFilter, synchronize_5 )
|
|||
expectedFilterGraph.push_back(factorEmpty);
|
||||
expectedFilterGraph.push_back(factorEmpty);
|
||||
expectedFilterGraph.push_back(factorEmpty);
|
||||
expectedFilterGraph.add(factor5);
|
||||
expectedFilterGraph.push_back(factor5);
|
||||
expectedFilterGraph.push_back(CalculateMarginals(partialGraphTransition, partialValuesTransition, keysToMarginalize3));
|
||||
|
||||
NonlinearFactorGraph actualFilterGraph;
|
||||
|
@ -987,10 +981,10 @@ TEST( ConcurrentBatchFilter, CalculateMarginals_1 )
|
|||
NonlinearFactor::shared_ptr factor4(new BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
|
||||
NonlinearFactorGraph factorGraph;
|
||||
factorGraph.add(factor1);
|
||||
factorGraph.add(factor2);
|
||||
factorGraph.add(factor3);
|
||||
factorGraph.add(factor4);
|
||||
factorGraph.push_back(factor1);
|
||||
factorGraph.push_back(factor2);
|
||||
factorGraph.push_back(factor3);
|
||||
factorGraph.push_back(factor4);
|
||||
|
||||
Values newValues;
|
||||
Pose3 value1 = Pose3().compose(poseError);
|
||||
|
@ -1008,17 +1002,17 @@ TEST( ConcurrentBatchFilter, CalculateMarginals_1 )
|
|||
ordering.push_back(2);
|
||||
ordering.push_back(3);
|
||||
|
||||
GaussianFactorGraph linearFactorGraph = *factorGraph.linearize(newValues, ordering);
|
||||
GaussianFactorGraph linearFactorGraph = *factorGraph.linearize(newValues);
|
||||
|
||||
// Create the set of marginalizable variables
|
||||
std::vector<Index> linearIndices;
|
||||
linearIndices.push_back(ordering.at(1));
|
||||
std::vector<Key> linearIndices;
|
||||
linearIndices.push_back(1);
|
||||
|
||||
std::pair<GaussianConditional::shared_ptr, GaussianFactorGraph> result = linearFactorGraph.eliminate(linearIndices, EliminateCholesky);
|
||||
GaussianFactorGraph result = *linearFactorGraph.eliminatePartialMultifrontal(linearIndices, EliminateCholesky).second;
|
||||
|
||||
NonlinearFactorGraph expectedMarginals;
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, result.second) {
|
||||
expectedMarginals.add(LinearContainerFactor(factor, ordering, newValues));
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, result) {
|
||||
expectedMarginals.push_back(LinearContainerFactor(factor, newValues));
|
||||
|
||||
}
|
||||
|
||||
|
@ -1041,10 +1035,10 @@ TEST( ConcurrentBatchFilter, CalculateMarginals_2 )
|
|||
NonlinearFactor::shared_ptr factor4(new BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
|
||||
NonlinearFactorGraph factorGraph;
|
||||
factorGraph.add(factor1);
|
||||
factorGraph.add(factor2);
|
||||
factorGraph.add(factor3);
|
||||
factorGraph.add(factor4);
|
||||
factorGraph.push_back(factor1);
|
||||
factorGraph.push_back(factor2);
|
||||
factorGraph.push_back(factor3);
|
||||
factorGraph.push_back(factor4);
|
||||
|
||||
Values newValues;
|
||||
Pose3 value1 = Pose3().compose(poseError);
|
||||
|
@ -1062,18 +1056,18 @@ TEST( ConcurrentBatchFilter, CalculateMarginals_2 )
|
|||
ordering.push_back(2);
|
||||
ordering.push_back(3);
|
||||
|
||||
GaussianFactorGraph linearFactorGraph = *factorGraph.linearize(newValues, ordering);
|
||||
GaussianFactorGraph linearFactorGraph = *factorGraph.linearize(newValues);
|
||||
|
||||
// Create the set of marginalizable variables
|
||||
std::vector<Index> linearIndices;
|
||||
linearIndices.push_back(ordering.at(1));
|
||||
linearIndices.push_back(ordering.at(2));
|
||||
std::vector<Key> linearIndices;
|
||||
linearIndices.push_back(1);
|
||||
linearIndices.push_back(2);
|
||||
|
||||
std::pair<GaussianConditional::shared_ptr, GaussianFactorGraph> result = linearFactorGraph.eliminate(linearIndices, EliminateCholesky);
|
||||
GaussianFactorGraph result = *linearFactorGraph.eliminatePartialMultifrontal(linearIndices, EliminateCholesky).second;
|
||||
|
||||
NonlinearFactorGraph expectedMarginals;
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, result.second) {
|
||||
expectedMarginals.add(LinearContainerFactor(factor, ordering, newValues));
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, result) {
|
||||
expectedMarginals.push_back(LinearContainerFactor(factor, newValues));
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -23,10 +23,10 @@
|
|||
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
|
||||
#include <gtsam/nonlinear/NonlinearFactorGraph.h>
|
||||
#include <gtsam/nonlinear/LinearContainerFactor.h>
|
||||
#include <gtsam/nonlinear/Ordering.h>
|
||||
#include <gtsam/nonlinear/Values.h>
|
||||
#include <gtsam/nonlinear/Symbol.h>
|
||||
#include <gtsam/nonlinear/Key.h>
|
||||
#include <gtsam/inference/Ordering.h>
|
||||
#include <gtsam/inference/Key.h>
|
||||
#include <gtsam/inference/JunctionTree.h>
|
||||
#include <gtsam/geometry/Pose3.h>
|
||||
#include <gtsam/base/TestableAssertions.h>
|
||||
|
@ -104,8 +104,8 @@ TEST( ConcurrentBatchSmoother, getFactors )
|
|||
|
||||
// Add some factors to the smoother
|
||||
NonlinearFactorGraph newFactors1;
|
||||
newFactors1.add(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors1.add(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
newFactors1.push_back(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors1.push_back(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
Values newValues1;
|
||||
newValues1.insert(1, Pose3());
|
||||
newValues1.insert(2, newValues1.at<Pose3>(1).compose(poseOdometry));
|
||||
|
@ -121,8 +121,8 @@ TEST( ConcurrentBatchSmoother, getFactors )
|
|||
|
||||
// Add some more factors to the smoother
|
||||
NonlinearFactorGraph newFactors2;
|
||||
newFactors2.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors2.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
Values newValues2;
|
||||
newValues2.insert(3, newValues1.at<Pose3>(2).compose(poseOdometry));
|
||||
newValues2.insert(4, newValues2.at<Pose3>(3).compose(poseOdometry));
|
||||
|
@ -154,8 +154,8 @@ TEST( ConcurrentBatchSmoother, getLinearizationPoint )
|
|||
|
||||
// Add some factors to the smoother
|
||||
NonlinearFactorGraph newFactors1;
|
||||
newFactors1.add(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors1.add(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
newFactors1.push_back(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors1.push_back(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
Values newValues1;
|
||||
newValues1.insert(1, Pose3());
|
||||
newValues1.insert(2, newValues1.at<Pose3>(1).compose(poseOdometry));
|
||||
|
@ -171,8 +171,8 @@ TEST( ConcurrentBatchSmoother, getLinearizationPoint )
|
|||
|
||||
// Add some more factors to the smoother
|
||||
NonlinearFactorGraph newFactors2;
|
||||
newFactors2.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors2.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
Values newValues2;
|
||||
newValues2.insert(3, newValues1.at<Pose3>(2).compose(poseOdometry));
|
||||
newValues2.insert(4, newValues2.at<Pose3>(3).compose(poseOdometry));
|
||||
|
@ -216,8 +216,8 @@ TEST( ConcurrentBatchSmoother, calculateEstimate )
|
|||
|
||||
// Add some factors to the smoother
|
||||
NonlinearFactorGraph newFactors2;
|
||||
newFactors2.add(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors2.add(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
Values newValues2;
|
||||
newValues2.insert(1, Pose3().compose(poseError));
|
||||
newValues2.insert(2, newValues2.at<Pose3>(1).compose(poseOdometry).compose(poseError));
|
||||
|
@ -236,8 +236,8 @@ TEST( ConcurrentBatchSmoother, calculateEstimate )
|
|||
|
||||
// Add some more factors to the smoother
|
||||
NonlinearFactorGraph newFactors3;
|
||||
newFactors3.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors3.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
newFactors3.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors3.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
Values newValues3;
|
||||
newValues3.insert(3, newValues2.at<Pose3>(2).compose(poseOdometry).compose(poseError));
|
||||
newValues3.insert(4, newValues3.at<Pose3>(3).compose(poseOdometry).compose(poseError));
|
||||
|
@ -302,8 +302,8 @@ TEST( ConcurrentBatchSmoother, update_multiple )
|
|||
|
||||
// Add some factors to the smoother
|
||||
NonlinearFactorGraph newFactors2;
|
||||
newFactors2.add(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors2.add(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
newFactors2.push_back(PriorFactor<Pose3>(1, poseInitial, noisePrior));
|
||||
newFactors2.push_back(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery));
|
||||
Values newValues2;
|
||||
newValues2.insert(1, Pose3().compose(poseError));
|
||||
newValues2.insert(2, newValues2.at<Pose3>(1).compose(poseOdometry).compose(poseError));
|
||||
|
@ -322,8 +322,8 @@ TEST( ConcurrentBatchSmoother, update_multiple )
|
|||
|
||||
// Add some more factors to the smoother
|
||||
NonlinearFactorGraph newFactors3;
|
||||
newFactors3.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors3.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
newFactors3.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
newFactors3.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
Values newValues3;
|
||||
newValues3.insert(3, newValues2.at<Pose3>(2).compose(poseOdometry).compose(poseError));
|
||||
newValues3.insert(4, newValues3.at<Pose3>(3).compose(poseOdometry).compose(poseError));
|
||||
|
@ -391,8 +391,8 @@ TEST( ConcurrentBatchSmoother, synchronize_1 )
|
|||
Ordering ordering;
|
||||
ordering.push_back(1);
|
||||
ordering.push_back(2);
|
||||
filterSumarization.add(LinearContainerFactor(PriorFactor<Pose3>(1, poseInitial, noisePrior).linearize(filterSeparatorValues, ordering), ordering, filterSeparatorValues));
|
||||
filterSumarization.add(LinearContainerFactor(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery).linearize(filterSeparatorValues, ordering), ordering, filterSeparatorValues));
|
||||
filterSumarization.push_back(LinearContainerFactor(PriorFactor<Pose3>(1, poseInitial, noisePrior).linearize(filterSeparatorValues), filterSeparatorValues));
|
||||
filterSumarization.push_back(LinearContainerFactor(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery).linearize(filterSeparatorValues), filterSeparatorValues));
|
||||
|
||||
// Create expected values: the smoother output will be empty for this case
|
||||
NonlinearFactorGraph expectedSmootherSummarization;
|
||||
|
@ -452,10 +452,10 @@ TEST( ConcurrentBatchSmoother, synchronize_2 )
|
|||
ordering.push_back(2);
|
||||
filterSeparatorValues.insert(1, Pose3().compose(poseError));
|
||||
filterSeparatorValues.insert(2, filterSeparatorValues.at<Pose3>(1).compose(poseOdometry).compose(poseError));
|
||||
filterSumarization.add(LinearContainerFactor(PriorFactor<Pose3>(1, poseInitial, noisePrior).linearize(filterSeparatorValues, ordering), ordering, filterSeparatorValues));
|
||||
filterSumarization.add(LinearContainerFactor(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery).linearize(filterSeparatorValues, ordering), ordering, filterSeparatorValues));
|
||||
smootherFactors.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
smootherFactors.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
filterSumarization.push_back(LinearContainerFactor(PriorFactor<Pose3>(1, poseInitial, noisePrior).linearize(filterSeparatorValues), filterSeparatorValues));
|
||||
filterSumarization.push_back(LinearContainerFactor(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery).linearize(filterSeparatorValues), filterSeparatorValues));
|
||||
smootherFactors.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
smootherFactors.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
smootherValues.insert(3, filterSeparatorValues.at<Pose3>(2).compose(poseOdometry).compose(poseError));
|
||||
smootherValues.insert(4, smootherValues.at<Pose3>(3).compose(poseOdometry).compose(poseError));
|
||||
|
||||
|
@ -523,11 +523,11 @@ TEST( ConcurrentBatchSmoother, synchronize_3 )
|
|||
ordering.push_back(2);
|
||||
filterSeparatorValues.insert(1, Pose3().compose(poseError));
|
||||
filterSeparatorValues.insert(2, filterSeparatorValues.at<Pose3>(1).compose(poseOdometry).compose(poseError));
|
||||
filterSumarization.add(LinearContainerFactor(PriorFactor<Pose3>(1, poseInitial, noisePrior).linearize(filterSeparatorValues, ordering), ordering, filterSeparatorValues));
|
||||
filterSumarization.add(LinearContainerFactor(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery).linearize(filterSeparatorValues, ordering), ordering, filterSeparatorValues));
|
||||
smootherFactors.add(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
smootherFactors.add(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
smootherFactors.add(PriorFactor<Pose3>(4, poseInitial, noisePrior));
|
||||
filterSumarization.push_back(LinearContainerFactor(PriorFactor<Pose3>(1, poseInitial, noisePrior).linearize(filterSeparatorValues), filterSeparatorValues));
|
||||
filterSumarization.push_back(LinearContainerFactor(BetweenFactor<Pose3>(1, 2, poseOdometry, noiseOdometery).linearize(filterSeparatorValues), filterSeparatorValues));
|
||||
smootherFactors.push_back(BetweenFactor<Pose3>(2, 3, poseOdometry, noiseOdometery));
|
||||
smootherFactors.push_back(BetweenFactor<Pose3>(3, 4, poseOdometry, noiseOdometery));
|
||||
smootherFactors.push_back(PriorFactor<Pose3>(4, poseInitial, noisePrior));
|
||||
smootherValues.insert(3, filterSeparatorValues.at<Pose3>(2).compose(poseOdometry).compose(poseError));
|
||||
smootherValues.insert(4, smootherValues.at<Pose3>(3).compose(poseOdometry).compose(poseError));
|
||||
|
||||
|
@ -557,19 +557,18 @@ TEST( ConcurrentBatchSmoother, synchronize_3 )
|
|||
NonlinearFactorGraph allFactors = smootherFactors;
|
||||
Values allValues = smoother.getLinearizationPoint();
|
||||
ordering = smoother.getOrdering(); // I'm really hoping this is an acceptable ordering...
|
||||
GaussianFactorGraph::shared_ptr linearFactors = allFactors.linearize(allValues, ordering);
|
||||
GaussianFactorGraph::shared_ptr linearFactors = allFactors.linearize(allValues);
|
||||
|
||||
FastSet<Index> eliminateIndices = linearFactors->keys();
|
||||
FastSet<Key> eliminateKeys = linearFactors->keys();
|
||||
BOOST_FOREACH(const Values::ConstKeyValuePair& key_value, filterSeparatorValues) {
|
||||
Index index = ordering.at(key_value.key);
|
||||
eliminateIndices.erase(index);
|
||||
eliminateKeys.erase(key_value.key);
|
||||
}
|
||||
std::vector<Index> variables(eliminateIndices.begin(), eliminateIndices.end());
|
||||
std::pair<GaussianConditional::shared_ptr, GaussianFactorGraph> result = linearFactors->eliminate(variables, EliminateCholesky);
|
||||
std::vector<Key> variables(eliminateKeys.begin(), eliminateKeys.end());
|
||||
GaussianFactorGraph result = *linearFactors->eliminatePartialMultifrontal(variables, EliminateCholesky).second;
|
||||
|
||||
expectedSmootherSummarization.resize(0);
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, result.second) {
|
||||
expectedSmootherSummarization.add(LinearContainerFactor(factor, ordering, allValues));
|
||||
BOOST_FOREACH(const GaussianFactor::shared_ptr& factor, result) {
|
||||
expectedSmootherSummarization.push_back(LinearContainerFactor(factor, allValues));
|
||||
}
|
||||
|
||||
CHECK(assert_equal(expectedSmootherSummarization, actualSmootherSummarization, 1e-6));
|
||||
|
|
Loading…
Reference in New Issue