From ae63321d1b653bda412fae88833299c6e0d4db9f Mon Sep 17 00:00:00 2001 From: Frank Dellaert Date: Sun, 22 Jan 2023 12:18:09 -0800 Subject: [PATCH] Deprecated boost iterators in Values --- examples/Pose3Localization.cpp | 10 +- examples/Pose3SLAMExample_changeKeys.cpp | 10 +- examples/Pose3SLAMExample_g2o.cpp | 4 +- ...ose3SLAMExample_initializePose3Chordal.cpp | 4 +- ...se3SLAMExample_initializePose3Gradient.cpp | 4 +- examples/SolverComparer.cpp | 6 +- gtsam/hybrid/HybridValues.h | 2 +- gtsam/hybrid/tests/testHybridBayesNet.cpp | 10 +- gtsam/nonlinear/ISAM2-impl.h | 30 ---- gtsam/nonlinear/ISAM2.cpp | 2 +- gtsam/nonlinear/NonlinearFactorGraph.cpp | 4 +- gtsam/nonlinear/Values-inl.h | 16 +- gtsam/nonlinear/Values.cpp | 119 ++++++++++----- gtsam/nonlinear/Values.h | 144 ++++++++++-------- .../internal/LevenbergMarquardtState.h | 7 +- .../tests/testLinearContainerFactor.cpp | 4 +- gtsam/nonlinear/tests/testValues.cpp | 27 +++- gtsam/slam/InitializePose.h | 16 +- gtsam/slam/InitializePose3.cpp | 50 +++--- gtsam/slam/dataset.cpp | 42 ++--- gtsam/slam/tests/testLago.cpp | 14 +- ...ConcurrentFilteringAndSmoothingExample.cpp | 8 +- .../nonlinear/BatchFixedLagSmoother.cpp | 20 +-- .../nonlinear/BatchFixedLagSmoother.h | 4 +- .../nonlinear/ConcurrentBatchFilter.cpp | 23 ++- .../nonlinear/ConcurrentBatchSmoother.cpp | 49 +++--- .../nonlinear/ConcurrentIncrementalFilter.cpp | 12 +- .../ConcurrentIncrementalSmoother.cpp | 23 ++- .../tests/testConcurrentBatchFilter.cpp | 4 +- .../tests/testConcurrentBatchSmoother.cpp | 4 +- .../tests/testConcurrentIncrementalFilter.cpp | 4 +- .../testConcurrentIncrementalSmootherDL.cpp | 8 +- .../testConcurrentIncrementalSmootherGN.cpp | 7 +- 33 files changed, 359 insertions(+), 332 deletions(-) diff --git a/examples/Pose3Localization.cpp b/examples/Pose3Localization.cpp index c18a9e9ce..44076ab38 100644 --- a/examples/Pose3Localization.cpp +++ b/examples/Pose3Localization.cpp @@ -43,9 +43,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } @@ -74,10 +74,8 @@ int main(const int argc, const char* argv[]) { // Calculate and print marginal covariances for all variables Marginals marginals(*graph, result); - for (const auto key_value : result) { - auto p = dynamic_cast*>(&key_value.value); - if (!p) continue; - std::cout << marginals.marginalCovariance(key_value.key) << endl; + for (const auto& key_pose : result.extract()) { + std::cout << marginals.marginalCovariance(key_pose.first) << endl; } return 0; } diff --git a/examples/Pose3SLAMExample_changeKeys.cpp b/examples/Pose3SLAMExample_changeKeys.cpp index 9aa697f14..7da83d211 100644 --- a/examples/Pose3SLAMExample_changeKeys.cpp +++ b/examples/Pose3SLAMExample_changeKeys.cpp @@ -55,14 +55,14 @@ int main(const int argc, const char *argv[]) { std::cout << "Rewriting input to file: " << inputFileRewritten << std::endl; // Additional: rewrite input with simplified keys 0,1,... Values simpleInitial; - for(const auto key_value: *initial) { + for (const auto k : initial->keys()) { Key key; - if(add) - key = key_value.key + firstKey; + if (add) + key = k + firstKey; else - key = key_value.key - firstKey; + key = k - firstKey; - simpleInitial.insert(key, initial->at(key_value.key)); + simpleInitial.insert(key, initial->at(k)); } NonlinearFactorGraph simpleGraph; for(const boost::shared_ptr& factor: *graph) { diff --git a/examples/Pose3SLAMExample_g2o.cpp b/examples/Pose3SLAMExample_g2o.cpp index 367964307..7ae2978ce 100644 --- a/examples/Pose3SLAMExample_g2o.cpp +++ b/examples/Pose3SLAMExample_g2o.cpp @@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } diff --git a/examples/Pose3SLAMExample_initializePose3Chordal.cpp b/examples/Pose3SLAMExample_initializePose3Chordal.cpp index 992750fed..03db9fc77 100644 --- a/examples/Pose3SLAMExample_initializePose3Chordal.cpp +++ b/examples/Pose3SLAMExample_initializePose3Chordal.cpp @@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } diff --git a/examples/Pose3SLAMExample_initializePose3Gradient.cpp b/examples/Pose3SLAMExample_initializePose3Gradient.cpp index 384f290a1..31693c5ff 100644 --- a/examples/Pose3SLAMExample_initializePose3Gradient.cpp +++ b/examples/Pose3SLAMExample_initializePose3Gradient.cpp @@ -42,9 +42,9 @@ int main(const int argc, const char* argv[]) { auto priorModel = noiseModel::Diagonal::Variances( (Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished()); Key firstKey = 0; - for (const auto key_value : *initial) { + for (const auto key : initial->keys()) { std::cout << "Adding prior to g2o file " << std::endl; - firstKey = key_value.key; + firstKey = key; graph->addPrior(firstKey, Pose3(), priorModel); break; } diff --git a/examples/SolverComparer.cpp b/examples/SolverComparer.cpp index 69975b620..3fffc31d0 100644 --- a/examples/SolverComparer.cpp +++ b/examples/SolverComparer.cpp @@ -559,12 +559,12 @@ void runPerturb() // Perturb values VectorValues noise; - for(const Values::KeyValuePair key_val: initial) + for(const auto& key_dim: initial.dims()) { - Vector noisev(key_val.value.dim()); + Vector noisev(key_dim.second); for(Vector::Index i = 0; i < noisev.size(); ++i) noisev(i) = normal(rng); - noise.insert(key_val.key, noisev); + noise.insert(key_dim.first, noisev); } Values perturbed = initial.retract(noise); diff --git a/gtsam/hybrid/HybridValues.h b/gtsam/hybrid/HybridValues.h index 005e3534b..94365db75 100644 --- a/gtsam/hybrid/HybridValues.h +++ b/gtsam/hybrid/HybridValues.h @@ -102,7 +102,7 @@ class GTSAM_EXPORT HybridValues { /// Check whether a variable with key \c j exists in values. bool existsNonlinear(Key j) { - return (nonlinear_.find(j) != nonlinear_.end()); + return nonlinear_.exists(j); }; /// Check whether a variable with key \c j exists. diff --git a/gtsam/hybrid/tests/testHybridBayesNet.cpp b/gtsam/hybrid/tests/testHybridBayesNet.cpp index 068a2031c..425e93415 100644 --- a/gtsam/hybrid/tests/testHybridBayesNet.cpp +++ b/gtsam/hybrid/tests/testHybridBayesNet.cpp @@ -129,10 +129,7 @@ TEST(HybridBayesNet, evaluateHybrid) { TEST(HybridBayesNet, Choose) { Switching s(4); - Ordering ordering; - for (auto&& kvp : s.linearizationPoint) { - ordering += kvp.key; - } + const Ordering ordering(s.linearizationPoint.keys()); HybridBayesNet::shared_ptr hybridBayesNet; HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; @@ -163,10 +160,7 @@ TEST(HybridBayesNet, Choose) { TEST(HybridBayesNet, OptimizeAssignment) { Switching s(4); - Ordering ordering; - for (auto&& kvp : s.linearizationPoint) { - ordering += kvp.key; - } + const Ordering ordering(s.linearizationPoint.keys()); HybridBayesNet::shared_ptr hybridBayesNet; HybridGaussianFactorGraph::shared_ptr remainingFactorGraph; diff --git a/gtsam/nonlinear/ISAM2-impl.h b/gtsam/nonlinear/ISAM2-impl.h index eb2285b28..7b7630582 100644 --- a/gtsam/nonlinear/ISAM2-impl.h +++ b/gtsam/nonlinear/ISAM2-impl.h @@ -436,36 +436,6 @@ struct GTSAM_EXPORT UpdateImpl { } } - /** - * Apply expmap to the given values, but only for indices appearing in - * \c mask. Values are expmapped in-place. - * \param mask Mask on linear indices, only \c true entries are expmapped - */ - static void ExpmapMasked(const VectorValues& delta, const KeySet& mask, - Values* theta) { - gttic(ExpmapMasked); - assert(theta->size() == delta.size()); - Values::iterator key_value; - VectorValues::const_iterator key_delta; -#ifdef GTSAM_USE_TBB - for (key_value = theta->begin(); key_value != theta->end(); ++key_value) { - key_delta = delta.find(key_value->key); -#else - for (key_value = theta->begin(), key_delta = delta.begin(); - key_value != theta->end(); ++key_value, ++key_delta) { - assert(key_value->key == key_delta->first); -#endif - Key var = key_value->key; - assert(static_cast(delta[var].size()) == key_value->value.dim()); - assert(delta[var].allFinite()); - if (mask.exists(var)) { - Value* retracted = key_value->value.retract_(delta[var]); - key_value->value = *retracted; - retracted->deallocate_(); - } - } - } - // Linearize new factors void linearizeNewFactors(const NonlinearFactorGraph& newFactors, const Values& theta, size_t numNonlinearFactors, diff --git a/gtsam/nonlinear/ISAM2.cpp b/gtsam/nonlinear/ISAM2.cpp index f56b23777..1c15469cc 100644 --- a/gtsam/nonlinear/ISAM2.cpp +++ b/gtsam/nonlinear/ISAM2.cpp @@ -454,7 +454,7 @@ ISAM2Result ISAM2::update(const NonlinearFactorGraph& newFactors, update.findFluid(roots_, relinKeys, &result.markedKeys, result.details()); // 6. Update linearization point for marked variables: // \Theta_{J}:=\Theta_{J}+\Delta_{J}. - UpdateImpl::ExpmapMasked(delta_, relinKeys, &theta_); + theta_.retractMasked(delta_, relinKeys); } result.variablesRelinearized = result.markedKeys.size(); } diff --git a/gtsam/nonlinear/NonlinearFactorGraph.cpp b/gtsam/nonlinear/NonlinearFactorGraph.cpp index dfa54f26f..84c15c004 100644 --- a/gtsam/nonlinear/NonlinearFactorGraph.cpp +++ b/gtsam/nonlinear/NonlinearFactorGraph.cpp @@ -285,8 +285,8 @@ static Scatter scatterFromValues(const Values& values) { scatter.reserve(values.size()); // use "natural" ordering with keys taken from the initial values - for (const auto key_value : values) { - scatter.add(key_value.key, key_value.value.dim()); + for (const auto& key_dim : values.dims()) { + scatter.add(key_dim.first, key_dim.second); } return scatter; diff --git a/gtsam/nonlinear/Values-inl.h b/gtsam/nonlinear/Values-inl.h index 598963761..474394f7b 100644 --- a/gtsam/nonlinear/Values-inl.h +++ b/gtsam/nonlinear/Values-inl.h @@ -24,12 +24,10 @@ #pragma once +#include + #include -#include - -#include // Only so Eclipse finds class definition - namespace gtsam { @@ -95,13 +93,13 @@ namespace gtsam { std::map Values::extract(const std::function& filterFcn) const { std::map result; - for (const auto& key_value : *this) { + for (const auto& key_value : values_) { // Check if key matches - if (filterFcn(key_value.key)) { + if (filterFcn(key_value.first)) { // Check if type matches (typically does as symbols matched with types) if (auto t = - dynamic_cast*>(&key_value.value)) - result[key_value.key] = t->value(); + dynamic_cast*>(key_value.second)) + result[key_value.first] = t->value(); } } return result; @@ -109,6 +107,8 @@ namespace gtsam { /* ************************************************************************* */ #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 +#include + template class Values::Filtered { public: diff --git a/gtsam/nonlinear/Values.cpp b/gtsam/nonlinear/Values.cpp index adadc99c0..e3adcc1bf 100644 --- a/gtsam/nonlinear/Values.cpp +++ b/gtsam/nonlinear/Values.cpp @@ -25,8 +25,6 @@ #include #include -#include - #include #include #include @@ -52,15 +50,15 @@ namespace gtsam { /* ************************************************************************* */ Values::Values(const Values& other, const VectorValues& delta) { - for (const_iterator key_value = other.begin(); key_value != other.end(); ++key_value) { - VectorValues::const_iterator it = delta.find(key_value->key); - Key key = key_value->key; // Non-const duplicate to deal with non-const insert argument + for (const auto& key_value : other.values_) { + VectorValues::const_iterator it = delta.find(key_value.first); + Key key = key_value.first; // Non-const duplicate to deal with non-const insert argument if (it != delta.end()) { const Vector& v = it->second; - Value* retractedValue(key_value->value.retract_(v)); // Retract + Value* retractedValue(key_value.second->retract_(v)); // Retract values_.insert(key, retractedValue); // Add retracted result directly to result values } else { - values_.insert(key, key_value->value.clone_()); // Add original version to result values + values_.insert(key, key_value.second->clone_()); // Add original version to result values } } } @@ -69,9 +67,9 @@ namespace gtsam { void Values::print(const string& str, const KeyFormatter& keyFormatter) const { cout << str << (str.empty() ? "" : "\n"); cout << "Values with " << size() << " values:\n"; - for(const_iterator key_value = begin(); key_value != end(); ++key_value) { - cout << "Value " << keyFormatter(key_value->key) << ": "; - key_value->value.print(""); + for (const auto& key_value : values_) { + cout << "Value " << keyFormatter(key_value.first) << ": "; + key_value.second->print(""); cout << "\n"; } } @@ -80,12 +78,12 @@ namespace gtsam { bool Values::equals(const Values& other, double tol) const { if (this->size() != other.size()) return false; - for (const_iterator it1 = this->begin(), it2 = other.begin(); - it1 != this->end(); ++it1, ++it2) { - const Value& value1 = it1->value; - const Value& value2 = it2->value; - if (typeid(value1) != typeid(value2) || it1->key != it2->key - || !value1.equals_(value2, tol)) { + for (auto it1 = values_.begin(), it2 = other.values_.begin(); + it1 != values_.end(); ++it1, ++it2) { + const Value* value1 = it1->second; + const Value* value2 = it2->second; + if (typeid(*value1) != typeid(*value2) || it1->first != it2->first + || !value1->equals_(*value2, tol)) { return false; } } @@ -102,17 +100,44 @@ namespace gtsam { return Values(*this, delta); } + /* ************************************************************************* */ + void Values::retractMasked(const VectorValues& delta, const KeySet& mask) { + gttic(retractMasked); + assert(theta->size() == delta.size()); + auto key_value = values_.begin(); + VectorValues::const_iterator key_delta; +#ifdef GTSAM_USE_TBB + for (; key_value != values_.end(); ++key_value) { + key_delta = delta.find(key_value.first); +#else + for (key_delta = delta.begin(); key_value != values_.end(); + ++key_value, ++key_delta) { + assert(key_value.first == key_delta->first); +#endif + Key var = key_value->first; + assert(static_cast(delta[var].size()) == key_value->second->dim()); + assert(delta[var].allFinite()); + if (mask.exists(var)) { + Value* retracted = key_value->second->retract_(delta[var]); + // TODO(dellaert): can we use std::move here? + *(key_value->second) = *retracted; + retracted->deallocate_(); + } + } + } + /* ************************************************************************* */ VectorValues Values::localCoordinates(const Values& cp) const { if(this->size() != cp.size()) throw DynamicValuesMismatched(); VectorValues result; - for(const_iterator it1=this->begin(), it2=cp.begin(); it1!=this->end(); ++it1, ++it2) { - if(it1->key != it2->key) + for (auto it1 = values_.begin(), it2 = cp.values_.begin(); + it1 != values_.end(); ++it1, ++it2) { + if(it1->first != it2->first) throw DynamicValuesMismatched(); // If keys do not match // Will throw a dynamic_cast exception if types do not match // NOTE: this is separate from localCoordinates(cp, ordering, result) due to at() vs. insert - result.insert(it1->key, it1->value.localCoordinates_(it2->value)); + result.insert(it1->first, it1->second->localCoordinates_(*it2->second)); } return result; } @@ -130,24 +155,26 @@ namespace gtsam { /* ************************************************************************* */ void Values::insert(Key j, const Value& val) { - std::pair insertResult = tryInsert(j, val); + auto insertResult = values_.insert(j, val.clone_()); if(!insertResult.second) throw ValuesKeyAlreadyExists(j); } /* ************************************************************************* */ - void Values::insert(const Values& values) { - for(const_iterator key_value = values.begin(); key_value != values.end(); ++key_value) { - Key key = key_value->key; // Non-const duplicate to deal with non-const insert argument - insert(key, key_value->value); + void Values::insert(const Values& other) { + for (auto key_value = other.values_.begin(); + key_value != other.values_.end(); ++key_value) { + insert(key_value->first, *(key_value->second)); } } /* ************************************************************************* */ +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 std::pair Values::tryInsert(Key j, const Value& value) { std::pair result = values_.insert(j, value.clone_()); return std::make_pair(boost::make_transform_iterator(result.first, &make_deref_pair), result.second); } +#endif /* ************************************************************************* */ void Values::update(Key j, const Value& val) { @@ -165,9 +192,10 @@ namespace gtsam { } /* ************************************************************************* */ - void Values::update(const Values& values) { - for(const_iterator key_value = values.begin(); key_value != values.end(); ++key_value) { - this->update(key_value->key, key_value->value); + void Values::update(const Values& other) { + for (auto key_value = other.values_.begin(); + key_value != other.values_.end(); ++key_value) { + this->update(key_value->first, *(key_value->second)); } } @@ -183,10 +211,10 @@ namespace gtsam { } /* ************************************************************************ */ - void Values::insert_or_assign(const Values& values) { - for (const_iterator key_value = values.begin(); key_value != values.end(); - ++key_value) { - this->insert_or_assign(key_value->key, key_value->value); + void Values::insert_or_assign(const Values& other) { + for (auto key_value = other.values_.begin(); + key_value != other.values_.end(); ++key_value) { + this->insert_or_assign(key_value->first, *(key_value->second)); } } @@ -202,8 +230,16 @@ namespace gtsam { KeyVector Values::keys() const { KeyVector result; result.reserve(size()); - for(const_iterator key_value = begin(); key_value != end(); ++key_value) - result.push_back(key_value->key); + for(const auto& key_value: values_) + result.push_back(key_value.first); + return result; + } + + /* ************************************************************************* */ + KeySet Values::keySet() const { + KeySet result; + for(const auto& key_value: values_) + result.insert(key_value.first); return result; } @@ -217,8 +253,17 @@ namespace gtsam { /* ************************************************************************* */ size_t Values::dim() const { size_t result = 0; - for(const auto key_value: *this) { - result += key_value.value.dim(); + for (const auto key_value : values_) { + result += key_value->second->dim(); + } + return result; + } + + /* ************************************************************************* */ + std::map Values::dims() const { + std::map result; + for (const auto key_value : values_) { + result.emplace(key_value->first, key_value->second->dim()); } return result; } @@ -226,8 +271,8 @@ namespace gtsam { /* ************************************************************************* */ VectorValues Values::zeroVectors() const { VectorValues result; - for(const auto key_value: *this) - result.insert(key_value.key, Vector::Zero(key_value.value.dim())); + for (const auto key_value : values_) + result.insert(key_value->first, Vector::Zero(key_value->second->dim())); return result; } diff --git a/gtsam/nonlinear/Values.h b/gtsam/nonlinear/Values.h index 79ddb0267..299435677 100644 --- a/gtsam/nonlinear/Values.h +++ b/gtsam/nonlinear/Values.h @@ -28,10 +28,10 @@ #include #include #include -#include #include #include #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 +#include #include #endif @@ -81,10 +81,6 @@ namespace gtsam { // The member to store the values, see just above KeyValueMap values_; - // Types obtained by iterating - typedef KeyValueMap::const_iterator::value_type ConstKeyValuePtrPair; - typedef KeyValueMap::iterator::value_type KeyValuePtrPair; - public: /// A shared_ptr to this class @@ -110,22 +106,6 @@ namespace gtsam { ConstKeyValuePair(const KeyValuePair& kv) : key(kv.key), value(kv.value) {} }; - /// Mutable forward iterator, with value type KeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::iterator> iterator; - - /// Const forward iterator, with value type ConstKeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::const_iterator> const_iterator; - - /// Mutable reverse iterator, with value type KeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::reverse_iterator> reverse_iterator; - - /// Const reverse iterator, with value type ConstKeyValuePair - typedef boost::transform_iterator< - std::function, KeyValueMap::const_reverse_iterator> const_reverse_iterator; - typedef KeyValuePair value_type; /** Default constructor creates an empty Values class */ @@ -191,47 +171,24 @@ namespace gtsam { template boost::optional exists(Key j) const; - /** Find an element by key, returning an iterator, or end() if the key was - * not found. */ - iterator find(Key j) { return boost::make_transform_iterator(values_.find(j), &make_deref_pair); } - - /** Find an element by key, returning an iterator, or end() if the key was - * not found. */ - const_iterator find(Key j) const { return boost::make_transform_iterator(values_.find(j), &make_const_deref_pair); } - - /** Find the element greater than or equal to the specified key. */ - iterator lower_bound(Key j) { return boost::make_transform_iterator(values_.lower_bound(j), &make_deref_pair); } - - /** Find the element greater than or equal to the specified key. */ - const_iterator lower_bound(Key j) const { return boost::make_transform_iterator(values_.lower_bound(j), &make_const_deref_pair); } - - /** Find the lowest-ordered element greater than the specified key. */ - iterator upper_bound(Key j) { return boost::make_transform_iterator(values_.upper_bound(j), &make_deref_pair); } - - /** Find the lowest-ordered element greater than the specified key. */ - const_iterator upper_bound(Key j) const { return boost::make_transform_iterator(values_.upper_bound(j), &make_const_deref_pair); } - /** The number of variables in this config */ size_t size() const { return values_.size(); } /** whether the config is empty */ bool empty() const { return values_.empty(); } - const_iterator begin() const { return boost::make_transform_iterator(values_.begin(), &make_const_deref_pair); } - const_iterator end() const { return boost::make_transform_iterator(values_.end(), &make_const_deref_pair); } - iterator begin() { return boost::make_transform_iterator(values_.begin(), &make_deref_pair); } - iterator end() { return boost::make_transform_iterator(values_.end(), &make_deref_pair); } - const_reverse_iterator rbegin() const { return boost::make_transform_iterator(values_.rbegin(), &make_const_deref_pair); } - const_reverse_iterator rend() const { return boost::make_transform_iterator(values_.rend(), &make_const_deref_pair); } - reverse_iterator rbegin() { return boost::make_transform_iterator(values_.rbegin(), &make_deref_pair); } - reverse_iterator rend() { return boost::make_transform_iterator(values_.rend(), &make_deref_pair); } - /// @name Manifold Operations /// @{ /** Add a delta config to current config and returns a new config */ Values retract(const VectorValues& delta) const; + /** + * Retract, but only for Keys appearing in \c mask. In-place. + * \param mask Mask on Keys where to apply retract. + */ + void retractMasked(const VectorValues& delta, const KeySet& mask); + /** Get a delta config about a linearization point c0 (*this) */ VectorValues localCoordinates(const Values& cp) const; @@ -252,12 +209,6 @@ namespace gtsam { /// version for double void insertDouble(Key j, double c) { insert(j,c); } - /** insert that mimics the STL map insert - if the value already exists, the map is not modified - * and an iterator to the existing value is returned, along with 'false'. If the value did not - * exist, it is inserted and an iterator pointing to the new element, along with 'true', is - * returned. */ - std::pair tryInsert(Key j, const Value& value); - /** single element change of existing element */ void update(Key j, const Value& val); @@ -288,11 +239,16 @@ namespace gtsam { void erase(Key j); /** - * Returns a set of keys in the config + * Returns a vector of keys in the config. * Note: by construction, the list is ordered */ KeyVector keys() const; + /** + * Returns a set of keys in the config. + */ + KeySet keySet() const; + /** Replace all keys and variables */ Values& operator=(const Values& rhs); @@ -305,6 +261,9 @@ namespace gtsam { /** Compute the total dimensionality of all values (\f$ O(n) \f$) */ size_t dim() const; + /** Return all dimensions in a map (\f$ O(n log n) \f$) */ + std::map dims() const; + /** Return a VectorValues of zero vectors for each variable in this Values */ VectorValues zeroVectors() const; @@ -312,8 +271,8 @@ namespace gtsam { template size_t count() const { size_t i = 0; - for (const auto key_value : *this) { - if (dynamic_cast*>(&key_value.value)) + for (const auto key_value : values_) { + if (dynamic_cast*>(key_value.second)) ++i; } return i; @@ -343,6 +302,67 @@ namespace gtsam { extract(const std::function& filterFcn = &_truePredicate) const; #ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 + // Types obtained by iterating + typedef KeyValueMap::const_iterator::value_type ConstKeyValuePtrPair; + typedef KeyValueMap::iterator::value_type KeyValuePtrPair; + + /** insert that mimics the STL map insert - if the value already exists, the map is not modified + * and an iterator to the existing value is returned, along with 'false'. If the value did not + * exist, it is inserted and an iterator pointing to the new element, along with 'true', is + * returned. */ + std::pair tryInsert(Key j, const Value& value); + + /// Mutable forward iterator, with value type KeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::iterator> iterator; + + /// Const forward iterator, with value type ConstKeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::const_iterator> const_iterator; + + /// Mutable reverse iterator, with value type KeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::reverse_iterator> reverse_iterator; + + /// Const reverse iterator, with value type ConstKeyValuePair + typedef boost::transform_iterator< + std::function, KeyValueMap::const_reverse_iterator> const_reverse_iterator; + + static ConstKeyValuePair make_const_deref_pair(const KeyValueMap::const_iterator::value_type& key_value) { + return ConstKeyValuePair(key_value.first, *key_value.second); } + + static KeyValuePair make_deref_pair(const KeyValueMap::iterator::value_type& key_value) { + return KeyValuePair(key_value.first, *key_value.second); } + + const_iterator begin() const { return boost::make_transform_iterator(values_.begin(), &make_const_deref_pair); } + const_iterator end() const { return boost::make_transform_iterator(values_.end(), &make_const_deref_pair); } + iterator begin() { return boost::make_transform_iterator(values_.begin(), &make_deref_pair); } + iterator end() { return boost::make_transform_iterator(values_.end(), &make_deref_pair); } + const_reverse_iterator rbegin() const { return boost::make_transform_iterator(values_.rbegin(), &make_const_deref_pair); } + const_reverse_iterator rend() const { return boost::make_transform_iterator(values_.rend(), &make_const_deref_pair); } + reverse_iterator rbegin() { return boost::make_transform_iterator(values_.rbegin(), &make_deref_pair); } + reverse_iterator rend() { return boost::make_transform_iterator(values_.rend(), &make_deref_pair); } + + /** Find an element by key, returning an iterator, or end() if the key was + * not found. */ + iterator find(Key j) { return boost::make_transform_iterator(values_.find(j), &make_deref_pair); } + + /** Find an element by key, returning an iterator, or end() if the key was + * not found. */ + const_iterator find(Key j) const { return boost::make_transform_iterator(values_.find(j), &make_const_deref_pair); } + + /** Find the element greater than or equal to the specified key. */ + iterator lower_bound(Key j) { return boost::make_transform_iterator(values_.lower_bound(j), &make_deref_pair); } + + /** Find the element greater than or equal to the specified key. */ + const_iterator lower_bound(Key j) const { return boost::make_transform_iterator(values_.lower_bound(j), &make_const_deref_pair); } + + /** Find the lowest-ordered element greater than the specified key. */ + iterator upper_bound(Key j) { return boost::make_transform_iterator(values_.upper_bound(j), &make_deref_pair); } + + /** Find the lowest-ordered element greater than the specified key. */ + const_iterator upper_bound(Key j) const { return boost::make_transform_iterator(values_.upper_bound(j), &make_const_deref_pair); } + /** A filtered view of a Values, returned from Values::filter. */ template class Filtered; @@ -395,12 +415,6 @@ namespace gtsam { ar & BOOST_SERIALIZATION_NVP(values_); } - static ConstKeyValuePair make_const_deref_pair(const KeyValueMap::const_iterator::value_type& key_value) { - return ConstKeyValuePair(key_value.first, *key_value.second); } - - static KeyValuePair make_deref_pair(const KeyValueMap::iterator::value_type& key_value) { - return KeyValuePair(key_value.first, *key_value.second); } - }; /* ************************************************************************* */ diff --git a/gtsam/nonlinear/internal/LevenbergMarquardtState.h b/gtsam/nonlinear/internal/LevenbergMarquardtState.h index 75e5a5135..a055f3060 100644 --- a/gtsam/nonlinear/internal/LevenbergMarquardtState.h +++ b/gtsam/nonlinear/internal/LevenbergMarquardtState.h @@ -128,9 +128,10 @@ struct LevenbergMarquardtState : public NonlinearOptimizerState { noiseModelCache.resize(0); // for each of the variables, add a prior damped.reserve(damped.size() + values.size()); - for (const auto key_value : values) { - const Key key = key_value.key; - const size_t dim = key_value.value.dim(); + std::map dims = values.dims(); + for (const auto& key_dim : dims) { + const Key& key = key_dim.first; + const size_t& dim = key_dim.second; const CachedModel* item = getCachedModel(dim); damped += boost::make_shared(key, item->A, item->b, item->model); } diff --git a/gtsam/nonlinear/tests/testLinearContainerFactor.cpp b/gtsam/nonlinear/tests/testLinearContainerFactor.cpp index 22ae4d73e..9bd9ace98 100644 --- a/gtsam/nonlinear/tests/testLinearContainerFactor.cpp +++ b/gtsam/nonlinear/tests/testLinearContainerFactor.cpp @@ -350,8 +350,8 @@ TEST(TestLinearContainerFactor, Rekey) { // For extra fun lets try linearizing this LCF gtsam::Values linearization_pt_rekeyed; - for (auto key_val : linearization_pt) { - linearization_pt_rekeyed.insert(key_map.at(key_val.key), key_val.value); + for (auto key : linearization_pt.keys()) { + linearization_pt_rekeyed.insert(key_map.at(key), linearization_pt.at(key)); } // Check independent values since we don't want to unnecessarily sort diff --git a/gtsam/nonlinear/tests/testValues.cpp b/gtsam/nonlinear/tests/testValues.cpp index 85dd2f4b3..644b8c84f 100644 --- a/gtsam/nonlinear/tests/testValues.cpp +++ b/gtsam/nonlinear/tests/testValues.cpp @@ -195,6 +195,7 @@ TEST(Values, basic_functions) values.insert(6, M1); values.insert(8, M2); +#ifdef GTSAM_ALLOW_DEPRECATED_SINCE_V42 // find EXPECT_LONGS_EQUAL(4, values.find(4)->key); EXPECT_LONGS_EQUAL(4, values_c.find(4)->key); @@ -210,7 +211,7 @@ TEST(Values, basic_functions) EXPECT_LONGS_EQUAL(6, values_c.upper_bound(4)->key); EXPECT_LONGS_EQUAL(4, values.upper_bound(3)->key); EXPECT_LONGS_EQUAL(4, values_c.upper_bound(3)->key); - +#endif } /* ************************************************************************* */ @@ -220,8 +221,8 @@ TEST(Values, retract_full) config0.insert(key1, Vector3(1.0, 2.0, 3.0)); config0.insert(key2, Vector3(5.0, 6.0, 7.0)); - VectorValues delta {{key1, Vector3(1.0, 1.1, 1.2)}, - {key2, Vector3(1.3, 1.4, 1.5)}}; + const VectorValues delta{{key1, Vector3(1.0, 1.1, 1.2)}, + {key2, Vector3(1.3, 1.4, 1.5)}}; Values expected; expected.insert(key1, Vector3(2.0, 3.1, 4.2)); @@ -238,7 +239,7 @@ TEST(Values, retract_partial) config0.insert(key1, Vector3(1.0, 2.0, 3.0)); config0.insert(key2, Vector3(5.0, 6.0, 7.0)); - VectorValues delta {{key2, Vector3(1.3, 1.4, 1.5)}}; + const VectorValues delta{{key2, Vector3(1.3, 1.4, 1.5)}}; Values expected; expected.insert(key1, Vector3(1.0, 2.0, 3.0)); @@ -248,6 +249,24 @@ TEST(Values, retract_partial) CHECK(assert_equal(expected, Values(config0, delta))); } +/* ************************************************************************* */ +TEST(Values, retract_masked) +{ + Values config0; + config0.insert(key1, Vector3(1.0, 2.0, 3.0)); + config0.insert(key2, Vector3(5.0, 6.0, 7.0)); + + const VectorValues delta{{key1, Vector3(1.0, 1.1, 1.2)}, + {key2, Vector3(1.3, 1.4, 1.5)}}; + + Values expected; + expected.insert(key1, Vector3(1.0, 2.0, 3.0)); + expected.insert(key2, Vector3(6.3, 7.4, 8.5)); + + config0.retractMasked(delta, {key2}); + CHECK(assert_equal(expected, config0)); +} + /* ************************************************************************* */ TEST(Values, equals) { diff --git a/gtsam/slam/InitializePose.h b/gtsam/slam/InitializePose.h index f9b99e47e..79e3fe813 100644 --- a/gtsam/slam/InitializePose.h +++ b/gtsam/slam/InitializePose.h @@ -62,10 +62,10 @@ static Values computePoses(const Values& initialRot, // Upgrade rotations to full poses Values initialPose; - for (const auto key_value : initialRot) { - Key key = key_value.key; - const auto& rot = initialRot.at(key); - Pose initializedPose = Pose(rot, origin); + for (const auto& key_rot : initialRot.extract()) { + const Key& key = key_rot.first; + const auto& rot = key_rot.second; + const Pose initializedPose(rot, origin); initialPose.insert(key, initializedPose); } @@ -82,14 +82,14 @@ static Values computePoses(const Values& initialRot, params.setVerbosity("TERMINATION"); } GaussNewtonOptimizer optimizer(*posegraph, initialPose, params); - Values GNresult = optimizer.optimize(); + const Values GNresult = optimizer.optimize(); // put into Values structure Values estimate; - for (const auto key_value : GNresult) { - Key key = key_value.key; + for (const auto& key_pose : GNresult.extract()) { + const Key& key = key_pose.first; if (key != kAnchorKey) { - const Pose& pose = GNresult.at(key); + const Pose& pose = key_pose.second; estimate.insert(key, pose); } } diff --git a/gtsam/slam/InitializePose3.cpp b/gtsam/slam/InitializePose3.cpp index 6bb1b0f36..e8ec9181c 100644 --- a/gtsam/slam/InitializePose3.cpp +++ b/gtsam/slam/InitializePose3.cpp @@ -122,12 +122,12 @@ Values InitializePose3::computeOrientationsGradient( gttic(InitializePose3_computeOrientationsGradient); // this works on the inverse rotations, according to Tron&Vidal,2011 - Values inverseRot; - inverseRot.insert(initialize::kAnchorKey, Rot3()); - for(const auto key_value: givenGuess) { - Key key = key_value.key; - const Pose3& pose = givenGuess.at(key); - inverseRot.insert(key, pose.rotation().inverse()); + std::map inverseRot; + inverseRot.emplace(initialize::kAnchorKey, Rot3()); + for(const auto& key_pose: givenGuess.extract()) { + const Key& key = key_pose.first; + const Pose3& pose = key_pose.second; + inverseRot.emplace(key, pose.rotation().inverse()); } // Create the map of edges incident on each node @@ -138,10 +138,8 @@ Values InitializePose3::computeOrientationsGradient( // calculate max node degree & allocate gradient size_t maxNodeDeg = 0; - VectorValues grad; - for(const auto key_value: inverseRot) { - Key key = key_value.key; - grad.insert(key,Z_3x1); + for (const auto& key_R : inverseRot) { + const Key& key = key_R.first; size_t currNodeDeg = (adjEdgesMap.at(key)).size(); if(currNodeDeg > maxNodeDeg) maxNodeDeg = currNodeDeg; @@ -162,28 +160,29 @@ Values InitializePose3::computeOrientationsGradient( ////////////////////////////////////////////////////////////////////////// // compute the gradient at each node maxGrad = 0; - for (const auto key_value : inverseRot) { - Key key = key_value.key; + VectorValues grad; + for (const auto& key_R : inverseRot) { + const Key& key = key_R.first; + const Rot3& Ri = key_R.second; Vector gradKey = Z_3x1; // collect the gradient for each edge incident on key for (const size_t& factorId : adjEdgesMap.at(key)) { - Rot3 Rij = factorId2RotMap.at(factorId); - Rot3 Ri = inverseRot.at(key); + const Rot3& Rij = factorId2RotMap.at(factorId); auto factor = pose3Graph.at(factorId); const auto& keys = factor->keys(); if (key == keys[0]) { Key key1 = keys[1]; - Rot3 Rj = inverseRot.at(key1); + const Rot3& Rj = inverseRot.at(key1); gradKey = gradKey + gradientTron(Ri, Rij * Rj, a, b); } else if (key == keys[1]) { Key key0 = keys[0]; - Rot3 Rj = inverseRot.at(key0); + const Rot3& Rj = inverseRot.at(key0); gradKey = gradKey + gradientTron(Ri, Rij.between(Rj), a, b); } else { cout << "Error in gradient computation" << endl; } } // end of i-th gradient computation - grad.at(key) = stepsize * gradKey; + grad.insert(key, stepsize * gradKey); double normGradKey = (gradKey).norm(); if(normGradKey>maxGrad) @@ -192,8 +191,12 @@ Values InitializePose3::computeOrientationsGradient( ////////////////////////////////////////////////////////////////////////// // update estimates - inverseRot = inverseRot.retract(grad); - + for (auto& key_R : inverseRot) { + const Key& key = key_R.first; + Rot3& Ri = key_R.second; + Ri = Ri.retract(grad.at(key)); + } + ////////////////////////////////////////////////////////////////////////// // check stopping condition if (it>20 && maxGrad < 5e-3) @@ -201,12 +204,13 @@ Values InitializePose3::computeOrientationsGradient( } // enf of gradient iterations // Return correct rotations - const Rot3& Rref = inverseRot.at(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior + const Rot3& Rref = inverseRot.at(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior Values estimateRot; - for(const auto key_value: inverseRot) { - Key key = key_value.key; + for (const auto key_R : inverseRot) { + const Key& key = key_R.first; + const Rot3& Ri = key_R.second; if (key != initialize::kAnchorKey) { - const Rot3& R = inverseRot.at(key); + const Rot3& R = inverseRot.at(key); if(setRefFrame) estimateRot.insert(key, Rref.compose(R.inverse())); else diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index db79fcd3c..9cb12aefb 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -586,9 +586,9 @@ void save2D(const NonlinearFactorGraph &graph, const Values &config, fstream stream(filename.c_str(), fstream::out); // save poses - for (const auto key_value : config) { - const Pose2 &pose = key_value.value.cast(); - stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y() + for (const auto &key_pose : config.extract()) { + const Pose2 &pose = key_pose.second; + stream << "VERTEX2 " << key_pose.first << " " << pose.x() << " " << pose.y() << " " << pose.theta() << endl; } @@ -635,45 +635,33 @@ void writeG2o(const NonlinearFactorGraph &graph, const Values &estimate, auto index = [](gtsam::Key key) { return Symbol(key).index(); }; // save 2D poses - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Pose2 &pose = p->value(); - stream << "VERTEX_SE2 " << index(key_value.key) << " " << pose.x() << " " + for (const auto &pair : estimate.extract()) { + const Pose2 &pose = pair.second; + stream << "VERTEX_SE2 " << index(pair.first) << " " << pose.x() << " " << pose.y() << " " << pose.theta() << endl; } // save 3D poses - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Pose3 &pose = p->value(); + for (const auto &pair : estimate.extract()) { + const Pose3 &pose = pair.second; const Point3 t = pose.translation(); const auto q = pose.rotation().toQuaternion(); - stream << "VERTEX_SE3:QUAT " << index(key_value.key) << " " << t.x() << " " + stream << "VERTEX_SE3:QUAT " << index(pair.first) << " " << t.x() << " " << t.y() << " " << t.z() << " " << q.x() << " " << q.y() << " " << q.z() << " " << q.w() << endl; } // save 2D landmarks - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Point2 &point = p->value(); - stream << "VERTEX_XY " << index(key_value.key) << " " << point.x() << " " + for (const auto &pair : estimate.extract()) { + const Point2 &point = pair.second; + stream << "VERTEX_XY " << index(pair.first) << " " << point.x() << " " << point.y() << endl; } // save 3D landmarks - for (const auto key_value : estimate) { - auto p = dynamic_cast *>(&key_value.value); - if (!p) - continue; - const Point3 &point = p->value(); - stream << "VERTEX_TRACKXYZ " << index(key_value.key) << " " << point.x() + for (const auto &pair : estimate.extract()) { + const Point3 &point = pair.second; + stream << "VERTEX_TRACKXYZ " << index(pair.first) << " " << point.x() << " " << point.y() << " " << point.z() << endl; } diff --git a/gtsam/slam/tests/testLago.cpp b/gtsam/slam/tests/testLago.cpp index 0b74f06d7..5d429796d 100644 --- a/gtsam/slam/tests/testLago.cpp +++ b/gtsam/slam/tests/testLago.cpp @@ -283,9 +283,10 @@ TEST( Lago, largeGraphNoisy_orientations ) { Values::shared_ptr expected; boost::tie(gmatlab, expected) = readG2o(matlabFile); - for(const auto key_val: *expected){ - Key k = key_val.key; - EXPECT(assert_equal(expected->at(k), actual.at(k), 1e-5)); + for(const auto key_pose: expected->extract()){ + const Key& k = key_pose.first; + const Pose2& pose = key_pose.second; + EXPECT(assert_equal(pose, actual.at(k), 1e-5)); } } @@ -309,9 +310,10 @@ TEST( Lago, largeGraphNoisy ) { Values::shared_ptr expected; boost::tie(gmatlab, expected) = readG2o(matlabFile); - for(const auto key_val: *expected){ - Key k = key_val.key; - EXPECT(assert_equal(expected->at(k), actual.at(k), 1e-2)); + for(const auto key_pose: expected->extract()){ + const Key& k = key_pose.first; + const Pose2& pose = key_pose.second; + EXPECT(assert_equal(pose, actual.at(k), 1e-2)); } } diff --git a/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp b/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp index 1494d784b..52a45b6d0 100644 --- a/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp +++ b/gtsam_unstable/examples/ConcurrentFilteringAndSmoothingExample.cpp @@ -308,12 +308,12 @@ int main(int argc, char** argv) { // And to demonstrate the fixed-lag aspect, print the keys contained in each smoother after 3.0 seconds cout << "After 15.0 seconds, each version contains to the following keys:" << endl; cout << " Concurrent Filter Keys: " << endl; - for(const auto key_value: concurrentFilter.getLinearizationPoint()) { - cout << setprecision(5) << " Key: " << key_value.key << endl; + for(const auto key: concurrentFilter.getLinearizationPoint().keys()) { + cout << setprecision(5) << " Key: " << key << endl; } cout << " Concurrent Smoother Keys: " << endl; - for(const auto key_value: concurrentSmoother.getLinearizationPoint()) { - cout << setprecision(5) << " Key: " << key_value.key << endl; + for(const auto key: concurrentSmoother.getLinearizationPoint().keys()) { + cout << setprecision(5) << " Key: " << key << endl; } cout << " Fixed-Lag Smoother Keys: " << endl; for(const auto& key_timestamp: fixedlagSmoother.timestamps()) { diff --git a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp index 18c664934..f5280ceff 100644 --- a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp +++ b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.cpp @@ -59,8 +59,8 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update( // Add the new variables to theta theta_.insert(newTheta); // Add new variables to the end of the ordering - for (const auto key_value : newTheta) { - ordering_.push_back(key_value.key); + for (const auto key : newTheta.keys()) { + ordering_.push_back(key); } // Augment Delta delta_.insert(newTheta.zeroVectors()); @@ -161,8 +161,8 @@ void BatchFixedLagSmoother::eraseKeys(const KeyVector& keys) { factorIndex_.erase(key); // Erase the key from the set of linearized keys - if (linearKeys_.exists(key)) { - linearKeys_.erase(key); + if (linearValues_.exists(key)) { + linearValues_.erase(key); } } @@ -186,8 +186,8 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() { // Create output result structure Result result; - result.nonlinearVariables = theta_.size() - linearKeys_.size(); - result.linearVariables = linearKeys_.size(); + result.nonlinearVariables = theta_.size() - linearValues_.size(); + result.linearVariables = linearValues_.size(); // Set optimization parameters double lambda = parameters_.lambdaInitial; @@ -265,10 +265,10 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() { // Reset the deltas to zeros delta_.setZero(); // Put the linearization points and deltas back for specific variables - if (enforceConsistency_ && (linearKeys_.size() > 0)) { - theta_.update(linearKeys_); - for(const auto key_value: linearKeys_) { - delta_.at(key_value.key) = newDelta.at(key_value.key); + if (enforceConsistency_ && (linearValues_.size() > 0)) { + theta_.update(linearValues_); + for(const auto key: linearValues_.keys()) { + delta_.at(key) = newDelta.at(key); } } // Decrease lambda for next time diff --git a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h index 94cf130cf..79bd22f9d 100644 --- a/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h +++ b/gtsam_unstable/nonlinear/BatchFixedLagSmoother.h @@ -136,8 +136,8 @@ protected: /** The current linearization point **/ Values theta_; - /** The set of keys involved in current linear factors. These keys should not be relinearized. **/ - Values linearKeys_; + /** The set of values involved in current linear factors. **/ + Values linearValues_; /** The current ordering */ Ordering ordering_; diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp index 83d0ab719..a31a75920 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentBatchFilter.cpp @@ -139,8 +139,8 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::update(const NonlinearFacto // Add the new variables to theta theta_.insert(newTheta); // Add new variables to the end of the ordering - for(const auto key_value: newTheta) { - ordering_.push_back(key_value.key); + for (const auto key : newTheta.keys()) { + ordering_.push_back(key); } // Augment Delta delta_.insert(newTheta.zeroVectors()); @@ -221,10 +221,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm if(debug) { PrintNonlinearFactorGraph(smootherSummarization_, "ConcurrentBatchFilter::synchronize ", "Updated Smoother Summarization:", DefaultKeyFormatter); } // Find the set of new separator keys - KeySet newSeparatorKeys; - for(const auto key_value: separatorValues_) { - newSeparatorKeys.insert(key_value.key); - } + const KeySet newSeparatorKeys = separatorValues_.keySet(); if(debug) { PrintKeys(newSeparatorKeys, "ConcurrentBatchFilter::synchronize ", "Current Separator Keys:"); } @@ -236,9 +233,9 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm graph.push_back(smootherShortcut_); Values values; values.insert(smootherSummarizationValues); - for(const auto key_value: separatorValues_) { - if(!values.exists(key_value.key)) { - values.insert(key_value.key, key_value.value); + for(const auto key: newSeparatorKeys) { + if(!values.exists(key)) { + values.insert(key, separatorValues_.at(key)); } } // Calculate the summarized factor on just the new separator keys @@ -471,8 +468,8 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values // Put the linearization points and deltas back for specific variables if(linearValues.size() > 0) { theta.update(linearValues); - for(const auto key_value: linearValues) { - delta.at(key_value.key) = newDelta.at(key_value.key); + for(const auto key: linearValues.keys()) { + delta.at(key) = newDelta.at(key); } } @@ -574,9 +571,7 @@ void ConcurrentBatchFilter::moveSeparator(const FastList& keysToMove) { // Calculate the set of new separator keys: AffectedKeys + PreviousSeparatorKeys - KeysToMove KeySet newSeparatorKeys = removedFactors.keys(); - for(const auto key_value: separatorValues_) { - newSeparatorKeys.insert(key_value.key); - } + newSeparatorKeys.merge(separatorValues_.keySet()); for(Key key: keysToMove) { newSeparatorKeys.erase(key); } diff --git a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp index 75d491bde..94a7d4c22 100644 --- a/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentBatchSmoother.cpp @@ -61,8 +61,8 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearF theta_.insert(newTheta); // Add new variables to the end of the ordering - for(const auto key_value: newTheta) { - ordering_.push_back(key_value.key); + for(const auto key: newTheta.keys()) { + ordering_.push_back(key); } // Augment Delta @@ -135,26 +135,30 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa removeFactors(filterSummarizationSlots_); // Insert new linpoints into the values, augment the ordering, and store new dims to augment delta - for(const auto key_value: smootherValues) { - std::pair iter_inserted = theta_.tryInsert(key_value.key, key_value.value); - if(iter_inserted.second) { - // If the insert succeeded - ordering_.push_back(key_value.key); - delta_.insert(key_value.key, Vector::Zero(key_value.value.dim())); + for(const auto key: smootherValues.keys()) { + if(!theta_.exists(key)) { + // If this a new key for theta_, also add to ordering and delta. + const auto& value = smootherValues.at(key); + delta_.insert(key, Vector::Zero(value.dim())); + theta_.insert(key, value); + ordering_.push_back(key); } else { - // If the element already existed in theta_ - iter_inserted.first->value = key_value.value; + // If the key already existed in theta_, just update. + const auto& value = smootherValues.at(key); + theta_.update(key, value); } } - for(const auto key_value: separatorValues) { - std::pair iter_inserted = theta_.tryInsert(key_value.key, key_value.value); - if(iter_inserted.second) { - // If the insert succeeded - ordering_.push_back(key_value.key); - delta_.insert(key_value.key, Vector::Zero(key_value.value.dim())); + for(const auto key: separatorValues.keys()) { + if(!theta_.exists(key)) { + // If this a new key for theta_, also add to ordering and delta. + const auto& value = separatorValues.at(key); + delta_.insert(key, Vector::Zero(value.dim())); + theta_.insert(key, value); + ordering_.push_back(key); } else { - // If the element already existed in theta_ - iter_inserted.first->value = key_value.value; + // If the key already existed in theta_, just update. + const auto& value = separatorValues.at(key); + theta_.update(key, value); } } @@ -322,8 +326,8 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() { // Put the linearization points and deltas back for specific variables if(separatorValues_.size() > 0) { theta_.update(separatorValues_); - for(const auto key_value: separatorValues_) { - delta_.at(key_value.key) = newDelta.at(key_value.key); + for(const auto key: separatorValues_.keys()) { + delta_.at(key) = newDelta.at(key); } } @@ -371,10 +375,7 @@ void ConcurrentBatchSmoother::updateSmootherSummarization() { } // Get the set of separator keys - gtsam::KeySet separatorKeys; - for(const auto key_value: separatorValues_) { - separatorKeys.insert(key_value.key); - } + const KeySet separatorKeys = separatorValues_.keySet(); // Calculate the marginal factors on the separator smootherSummarization_ = internal::calculateMarginalFactors(graph, theta_, separatorKeys, parameters_.getEliminationFunction()); diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp index b9cf66a97..947c02cc0 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalFilter.cpp @@ -69,14 +69,14 @@ ConcurrentIncrementalFilter::Result ConcurrentIncrementalFilter::update(const No int group = 1; // Set all existing variables to Group1 if(isam2_.getLinearizationPoint().size() > 0) { - for(const auto key_value: isam2_.getLinearizationPoint()) { - orderingConstraints->operator[](key_value.key) = group; + for(const auto key: isam2_.getLinearizationPoint().keys()) { + orderingConstraints->operator[](key) = group; } ++group; } // Assign new variables to the root - for(const auto key_value: newTheta) { - orderingConstraints->operator[](key_value.key) = group; + for(const auto key: newTheta.keys()) { + orderingConstraints->operator[](key) = group; } // Set marginalizable variables to Group0 for(Key key: *keysToMove){ @@ -201,8 +201,8 @@ void ConcurrentIncrementalFilter::synchronize(const NonlinearFactorGraph& smooth // Force iSAM2 not to relinearize anything during this iteration FastList noRelinKeys; - for(const auto key_value: isam2_.getLinearizationPoint()) { - noRelinKeys.push_back(key_value.key); + for(const auto key: isam2_.getLinearizationPoint().keys()) { + noRelinKeys.push_back(key); } // Calculate the summarized factor on just the new separator keys diff --git a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp index 3886d0e42..b82b08048 100644 --- a/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp +++ b/gtsam_unstable/nonlinear/ConcurrentIncrementalSmoother.cpp @@ -66,9 +66,9 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons // Also, mark the separator keys as fixed linearization points FastMap constrainedKeys; FastList noRelinKeys; - for(const auto key_value: separatorValues_) { - constrainedKeys[key_value.key] = 1; - noRelinKeys.push_back(key_value.key); + for (const auto key : separatorValues_.keys()) { + constrainedKeys[key] = 1; + noRelinKeys.push_back(key); } // Use iSAM2 to perform an update @@ -82,14 +82,14 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons Values values(newTheta); // Unfortunately, we must be careful here, as some of the smoother values // and/or separator values may have been added previously - for(const auto key_value: smootherValues_) { - if(!isam2_.getLinearizationPoint().exists(key_value.key)) { - values.insert(key_value.key, smootherValues_.at(key_value.key)); + for(const auto key: smootherValues_.keys()) { + if(!isam2_.getLinearizationPoint().exists(key)) { + values.insert(key, smootherValues_.at(key)); } } - for(const auto key_value: separatorValues_) { - if(!isam2_.getLinearizationPoint().exists(key_value.key)) { - values.insert(key_value.key, separatorValues_.at(key_value.key)); + for(const auto key: separatorValues_.keys()) { + if(!isam2_.getLinearizationPoint().exists(key)) { + values.insert(key, separatorValues_.at(key)); } } @@ -245,10 +245,7 @@ void ConcurrentIncrementalSmoother::updateSmootherSummarization() { } // Get the set of separator keys - KeySet separatorKeys; - for(const auto key_value: separatorValues_) { - separatorKeys.insert(key_value.key); - } + const KeySet separatorKeys = separatorValues_.keySet(); // Calculate the marginal factors on the separator smootherSummarization_ = internal::calculateMarginalFactors(graph, isam2_.getLinearizationPoint(), separatorKeys, diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp index 61db05167..15038c23f 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentBatchFilter.cpp @@ -64,8 +64,8 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph, std::set KeysToKeep; - for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph - KeysToKeep.insert(key_value.key); + for(const auto key: linPoint.keys()) { // we cycle over all the keys of factorGraph + KeysToKeep.insert(key); } // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize for(Key key: keysToMarginalize) { KeysToKeep.erase(key); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp index b5fb61428..a2733d509 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentBatchSmoother.cpp @@ -560,8 +560,8 @@ TEST( ConcurrentBatchSmoother, synchronize_3 ) GaussianFactorGraph::shared_ptr linearFactors = allFactors.linearize(allValues); KeySet eliminateKeys = linearFactors->keys(); - for(const auto key_value: filterSeparatorValues) { - eliminateKeys.erase(key_value.key); + for(const auto key: filterSeparatorValues.keys()) { + eliminateKeys.erase(key); } KeyVector variables(eliminateKeys.begin(), eliminateKeys.end()); GaussianFactorGraph result = *linearFactors->eliminatePartialMultifrontal(variables, EliminateCholesky).second; diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp index 08d71a420..7c6a08278 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalFilter.cpp @@ -80,8 +80,8 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph, std::set KeysToKeep; - for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph - KeysToKeep.insert(key_value.key); + for(const auto key: linPoint.keys()) { // we cycle over all the keys of factorGraph + KeysToKeep.insert(key); } // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize for(Key key: keysToMarginalize) { KeysToKeep.erase(key); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp index ccb5a104e..0e01112eb 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherDL.cpp @@ -512,8 +512,8 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_2 ) // Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1); Values expectedLinearizationPoint = filterSeparatorValues; Values actualLinearizationPoint; - for(const auto key_value: filterSeparatorValues) { - actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key)); + for(const auto key: filterSeparatorValues.keys()) { + actualLinearizationPoint.insert(key, smoother.getLinearizationPoint().at(key)); } CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6)); } @@ -580,8 +580,8 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_3 ) // GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate(); KeySet allkeys = LinFactorGraph->keys(); - for(const auto key_value: filterSeparatorValues) { - allkeys.erase(key_value.key); + for(const auto key: filterSeparatorValues.keys()) { + allkeys.erase(key); } KeyVector variables(allkeys.begin(), allkeys.end()); std::pair result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky); diff --git a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp index 53c3d1610..a33fcc481 100644 --- a/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp +++ b/gtsam_unstable/nonlinear/tests/testConcurrentIncrementalSmootherGN.cpp @@ -513,8 +513,8 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_2 ) // Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1); Values expectedLinearizationPoint = filterSeparatorValues; Values actualLinearizationPoint; - for(const auto key_value: filterSeparatorValues) { - actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key)); + for(const auto key: filterSeparatorValues.keys()) { + actualLinearizationPoint.insert(key, smoother.getLinearizationPoint().at(key)); } CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6)); } @@ -582,8 +582,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_3 ) // GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate(); KeySet allkeys = LinFactorGraph->keys(); - for(const auto key_value: filterSeparatorValues) - allkeys.erase(key_value.key); + for (const auto key : filterSeparatorValues.keys()) allkeys.erase(key); KeyVector variables(allkeys.begin(), allkeys.end()); std::pair result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky);