Removed reference for iterating over values. Also used auto where I could, when changing.
parent
5df235d714
commit
4d100461d4
|
@ -43,7 +43,7 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const Values::ConstKeyValuePair& key_value : *initial) {
|
||||
for (const auto key_value : *initial) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
|
@ -74,7 +74,7 @@ int main(const int argc, const char* argv[]) {
|
|||
|
||||
// Calculate and print marginal covariances for all variables
|
||||
Marginals marginals(*graph, result);
|
||||
for (const auto& key_value : result) {
|
||||
for (const auto key_value : result) {
|
||||
auto p = dynamic_cast<const GenericValue<Pose3>*>(&key_value.value);
|
||||
if (!p) continue;
|
||||
std::cout << marginals.marginalCovariance(key_value.key) << endl;
|
||||
|
|
|
@ -55,7 +55,7 @@ int main(const int argc, const char *argv[]) {
|
|||
std::cout << "Rewriting input to file: " << inputFileRewritten << std::endl;
|
||||
// Additional: rewrite input with simplified keys 0,1,...
|
||||
Values simpleInitial;
|
||||
for(const Values::ConstKeyValuePair& key_value: *initial) {
|
||||
for(const auto key_value: *initial) {
|
||||
Key key;
|
||||
if(add)
|
||||
key = key_value.key + firstKey;
|
||||
|
|
|
@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const Values::ConstKeyValuePair& key_value : *initial) {
|
||||
for (const auto key_value : *initial) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
|
|
|
@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const Values::ConstKeyValuePair& key_value : *initial) {
|
||||
for (const auto key_value : *initial) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
|
|
|
@ -42,7 +42,7 @@ int main(const int argc, const char* argv[]) {
|
|||
auto priorModel = noiseModel::Diagonal::Variances(
|
||||
(Vector(6) << 1e-6, 1e-6, 1e-6, 1e-4, 1e-4, 1e-4).finished());
|
||||
Key firstKey = 0;
|
||||
for (const Values::ConstKeyValuePair& key_value : *initial) {
|
||||
for (const auto key_value : *initial) {
|
||||
std::cout << "Adding prior to g2o file " << std::endl;
|
||||
firstKey = key_value.key;
|
||||
graph->addPrior(firstKey, Pose3(), priorModel);
|
||||
|
|
|
@ -559,7 +559,7 @@ void runPerturb()
|
|||
|
||||
// Perturb values
|
||||
VectorValues noise;
|
||||
for(const Values::KeyValuePair& key_val: initial)
|
||||
for(const Values::KeyValuePair key_val: initial)
|
||||
{
|
||||
Vector noisev(key_val.value.dim());
|
||||
for(Vector::Index i = 0; i < noisev.size(); ++i)
|
||||
|
|
|
@ -825,7 +825,7 @@ TEST(Rot3, RQ_derivative) {
|
|||
const auto R = Rot3::RzRyRx(xyz).matrix();
|
||||
const auto num = numericalDerivative11(RQ_proxy, R);
|
||||
Matrix39 calc;
|
||||
RQ(R, calc).second;
|
||||
auto dummy = RQ(R, calc).second;
|
||||
|
||||
const auto err = vec_err.second;
|
||||
CHECK(assert_equal(num, calc, err));
|
||||
|
|
|
@ -161,7 +161,7 @@ namespace gtsam {
|
|||
bool VectorValues::equals(const VectorValues& x, double tol) const {
|
||||
if(this->size() != x.size())
|
||||
return false;
|
||||
for(const auto& values: boost::combine(*this, x)) {
|
||||
for(const auto values: boost::combine(*this, x)) {
|
||||
if(values.get<0>().first != values.get<1>().first ||
|
||||
!equal_with_abs_tol(values.get<0>().second, values.get<1>().second, tol))
|
||||
return false;
|
||||
|
@ -233,7 +233,7 @@ namespace gtsam {
|
|||
double result = 0.0;
|
||||
typedef boost::tuple<value_type, value_type> ValuePair;
|
||||
using boost::adaptors::map_values;
|
||||
for(const ValuePair& values: boost::combine(*this, v)) {
|
||||
for(const ValuePair values: boost::combine(*this, v)) {
|
||||
assert_throw(values.get<0>().first == values.get<1>().first,
|
||||
invalid_argument("VectorValues::dot called with a VectorValues of different structure"));
|
||||
assert_throw(values.get<0>().second.size() == values.get<1>().second.size(),
|
||||
|
|
|
@ -376,7 +376,7 @@ static Scatter scatterFromValues(const Values& values) {
|
|||
scatter.reserve(values.size());
|
||||
|
||||
// use "natural" ordering with keys taken from the initial values
|
||||
for (const auto& key_value : values) {
|
||||
for (const auto key_value : values) {
|
||||
scatter.add(key_value.key, key_value.value.dim());
|
||||
}
|
||||
|
||||
|
|
|
@ -217,7 +217,7 @@ namespace gtsam {
|
|||
/** Constructor from a Filtered view copies out all values */
|
||||
template<class ValueType>
|
||||
Values::Values(const Values::Filtered<ValueType>& view) {
|
||||
for(const typename Filtered<ValueType>::KeyValuePair& key_value: view) {
|
||||
for(const auto key_value: view) {
|
||||
Key key = key_value.key;
|
||||
insert(key, static_cast<const ValueType&>(key_value.value));
|
||||
}
|
||||
|
@ -226,7 +226,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
template<class ValueType>
|
||||
Values::Values(const Values::ConstFiltered<ValueType>& view) {
|
||||
for(const typename ConstFiltered<ValueType>::KeyValuePair& key_value: view) {
|
||||
for(const auto key_value: view) {
|
||||
Key key = key_value.key;
|
||||
insert(key, static_cast<const ValueType&>(key_value.value));
|
||||
}
|
||||
|
|
|
@ -206,7 +206,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
size_t Values::dim() const {
|
||||
size_t result = 0;
|
||||
for(const ConstKeyValuePair& key_value: *this) {
|
||||
for(const auto key_value: *this) {
|
||||
result += key_value.value.dim();
|
||||
}
|
||||
return result;
|
||||
|
@ -215,7 +215,7 @@ namespace gtsam {
|
|||
/* ************************************************************************* */
|
||||
VectorValues Values::zeroVectors() const {
|
||||
VectorValues result;
|
||||
for(const ConstKeyValuePair& key_value: *this)
|
||||
for(const auto key_value: *this)
|
||||
result.insert(key_value.key, Vector::Zero(key_value.value.dim()));
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -126,7 +126,7 @@ struct LevenbergMarquardtState : public NonlinearOptimizerState {
|
|||
noiseModelCache.resize(0);
|
||||
// for each of the variables, add a prior
|
||||
damped.reserve(damped.size() + values.size());
|
||||
for (const auto& key_value : values) {
|
||||
for (const auto key_value : values) {
|
||||
const Key key = key_value.key;
|
||||
const size_t dim = key_value.value.dim();
|
||||
const CachedModel* item = getCachedModel(dim);
|
||||
|
|
|
@ -335,7 +335,7 @@ TEST(Values, filter) {
|
|||
int i = 0;
|
||||
Values::Filtered<Value> filtered = values.filter(boost::bind(std::greater_equal<Key>(), _1, 2));
|
||||
EXPECT_LONGS_EQUAL(2, (long)filtered.size());
|
||||
for(const Values::Filtered<>::KeyValuePair& key_value: filtered) {
|
||||
for(const auto key_value: filtered) {
|
||||
if(i == 0) {
|
||||
LONGS_EQUAL(2, (long)key_value.key);
|
||||
try {key_value.value.cast<Pose2>();} catch (const std::bad_cast& e) { FAIL("can't cast Value to Pose2");}
|
||||
|
@ -370,7 +370,7 @@ TEST(Values, filter) {
|
|||
i = 0;
|
||||
Values::ConstFiltered<Pose3> pose_filtered = values.filter<Pose3>();
|
||||
EXPECT_LONGS_EQUAL(2, (long)pose_filtered.size());
|
||||
for(const Values::ConstFiltered<Pose3>::KeyValuePair& key_value: pose_filtered) {
|
||||
for(const auto key_value: pose_filtered) {
|
||||
if(i == 0) {
|
||||
EXPECT_LONGS_EQUAL(1, (long)key_value.key);
|
||||
EXPECT(assert_equal(pose1, key_value.value));
|
||||
|
@ -408,7 +408,7 @@ TEST(Values, Symbol_filter) {
|
|||
values.insert(Symbol('y', 3), pose3);
|
||||
|
||||
int i = 0;
|
||||
for(const Values::Filtered<Value>::KeyValuePair& key_value: values.filter(Symbol::ChrTest('y'))) {
|
||||
for(const auto key_value: values.filter(Symbol::ChrTest('y'))) {
|
||||
if(i == 0) {
|
||||
LONGS_EQUAL(Symbol('y', 1), (long)key_value.key);
|
||||
EXPECT(assert_equal(pose1, key_value.value.cast<Pose3>()));
|
||||
|
|
|
@ -62,7 +62,7 @@ static Values computePoses(const Values& initialRot,
|
|||
|
||||
// Upgrade rotations to full poses
|
||||
Values initialPose;
|
||||
for (const auto& key_value : initialRot) {
|
||||
for (const auto key_value : initialRot) {
|
||||
Key key = key_value.key;
|
||||
const auto& rot = initialRot.at<typename Pose::Rotation>(key);
|
||||
Pose initializedPose = Pose(rot, origin);
|
||||
|
@ -86,7 +86,7 @@ static Values computePoses(const Values& initialRot,
|
|||
|
||||
// put into Values structure
|
||||
Values estimate;
|
||||
for (const auto& key_value : GNresult) {
|
||||
for (const auto key_value : GNresult) {
|
||||
Key key = key_value.key;
|
||||
if (key != kAnchorKey) {
|
||||
const Pose& pose = GNresult.at<Pose>(key);
|
||||
|
|
|
@ -124,7 +124,7 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
// this works on the inverse rotations, according to Tron&Vidal,2011
|
||||
Values inverseRot;
|
||||
inverseRot.insert(initialize::kAnchorKey, Rot3());
|
||||
for(const auto& key_value: givenGuess) {
|
||||
for(const auto key_value: givenGuess) {
|
||||
Key key = key_value.key;
|
||||
const Pose3& pose = givenGuess.at<Pose3>(key);
|
||||
inverseRot.insert(key, pose.rotation().inverse());
|
||||
|
@ -139,7 +139,7 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
// calculate max node degree & allocate gradient
|
||||
size_t maxNodeDeg = 0;
|
||||
VectorValues grad;
|
||||
for(const auto& key_value: inverseRot) {
|
||||
for(const auto key_value: inverseRot) {
|
||||
Key key = key_value.key;
|
||||
grad.insert(key,Z_3x1);
|
||||
size_t currNodeDeg = (adjEdgesMap.at(key)).size();
|
||||
|
@ -162,7 +162,7 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
//////////////////////////////////////////////////////////////////////////
|
||||
// compute the gradient at each node
|
||||
maxGrad = 0;
|
||||
for (const auto& key_value : inverseRot) {
|
||||
for (const auto key_value : inverseRot) {
|
||||
Key key = key_value.key;
|
||||
Vector gradKey = Z_3x1;
|
||||
// collect the gradient for each edge incident on key
|
||||
|
@ -203,7 +203,7 @@ Values InitializePose3::computeOrientationsGradient(
|
|||
// Return correct rotations
|
||||
const Rot3& Rref = inverseRot.at<Rot3>(initialize::kAnchorKey); // This will be set to the identity as so far we included no prior
|
||||
Values estimateRot;
|
||||
for(const auto& key_value: inverseRot) {
|
||||
for(const auto key_value: inverseRot) {
|
||||
Key key = key_value.key;
|
||||
if (key != initialize::kAnchorKey) {
|
||||
const Rot3& R = inverseRot.at<Rot3>(key);
|
||||
|
|
|
@ -586,7 +586,7 @@ void save2D(const NonlinearFactorGraph &graph, const Values &config,
|
|||
fstream stream(filename.c_str(), fstream::out);
|
||||
|
||||
// save poses
|
||||
for (const Values::ConstKeyValuePair key_value : config) {
|
||||
for (const auto key_value : config) {
|
||||
const Pose2 &pose = key_value.value.cast<Pose2>();
|
||||
stream << "VERTEX2 " << key_value.key << " " << pose.x() << " " << pose.y()
|
||||
<< " " << pose.theta() << endl;
|
||||
|
|
|
@ -284,7 +284,7 @@ TEST( Lago, largeGraphNoisy_orientations ) {
|
|||
Values::shared_ptr expected;
|
||||
boost::tie(gmatlab, expected) = readG2o(matlabFile);
|
||||
|
||||
for(const Values::KeyValuePair& key_val: *expected){
|
||||
for(const auto key_val: *expected){
|
||||
Key k = key_val.key;
|
||||
EXPECT(assert_equal(expected->at<Pose2>(k), actual.at<Pose2>(k), 1e-5));
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ TEST( Lago, largeGraphNoisy ) {
|
|||
Values::shared_ptr expected;
|
||||
boost::tie(gmatlab, expected) = readG2o(matlabFile);
|
||||
|
||||
for(const Values::KeyValuePair& key_val: *expected){
|
||||
for(const auto key_val: *expected){
|
||||
Key k = key_val.key;
|
||||
EXPECT(assert_equal(expected->at<Pose2>(k), actual.at<Pose2>(k), 1e-2));
|
||||
}
|
||||
|
|
|
@ -308,11 +308,11 @@ int main(int argc, char** argv) {
|
|||
// And to demonstrate the fixed-lag aspect, print the keys contained in each smoother after 3.0 seconds
|
||||
cout << "After 15.0 seconds, each version contains to the following keys:" << endl;
|
||||
cout << " Concurrent Filter Keys: " << endl;
|
||||
for(const auto& key_value: concurrentFilter.getLinearizationPoint()) {
|
||||
for(const auto key_value: concurrentFilter.getLinearizationPoint()) {
|
||||
cout << setprecision(5) << " Key: " << key_value.key << endl;
|
||||
}
|
||||
cout << " Concurrent Smoother Keys: " << endl;
|
||||
for(const auto& key_value: concurrentSmoother.getLinearizationPoint()) {
|
||||
for(const auto key_value: concurrentSmoother.getLinearizationPoint()) {
|
||||
cout << setprecision(5) << " Key: " << key_value.key << endl;
|
||||
}
|
||||
cout << " Fixed-Lag Smoother Keys: " << endl;
|
||||
|
|
|
@ -233,7 +233,7 @@ int main(int argc, char** argv) {
|
|||
}
|
||||
}
|
||||
countK = 0;
|
||||
for(const Values::ConstFiltered<Point2>::KeyValuePair& it: result.filter<Point2>())
|
||||
for(const auto it: result.filter<Point2>())
|
||||
os2 << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t1"
|
||||
<< endl;
|
||||
if (smart) {
|
||||
|
@ -256,7 +256,7 @@ int main(int argc, char** argv) {
|
|||
// Write result to file
|
||||
Values result = isam.calculateEstimate();
|
||||
ofstream os("rangeResult.txt");
|
||||
for(const Values::ConstFiltered<Pose2>::KeyValuePair& it: result.filter<Pose2>())
|
||||
for(const auto it: result.filter<Pose2>())
|
||||
os << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t"
|
||||
<< it.value.theta() << endl;
|
||||
exit(0);
|
||||
|
|
|
@ -202,11 +202,11 @@ int main(int argc, char** argv) {
|
|||
// Write result to file
|
||||
Values result = isam.calculateEstimate();
|
||||
ofstream os2("rangeResultLM.txt");
|
||||
for(const Values::ConstFiltered<Point2>::KeyValuePair& it: result.filter<Point2>())
|
||||
for(const auto it: result.filter<Point2>())
|
||||
os2 << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t1"
|
||||
<< endl;
|
||||
ofstream os("rangeResult.txt");
|
||||
for(const Values::ConstFiltered<Pose2>::KeyValuePair& it: result.filter<Pose2>())
|
||||
for(const auto it: result.filter<Pose2>())
|
||||
os << it.key << "\t" << it.value.x() << "\t" << it.value.y() << "\t"
|
||||
<< it.value.theta() << endl;
|
||||
exit(0);
|
||||
|
|
|
@ -59,7 +59,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::update(
|
|||
// Add the new variables to theta
|
||||
theta_.insert(newTheta);
|
||||
// Add new variables to the end of the ordering
|
||||
for (const auto& key_value : newTheta) {
|
||||
for (const auto key_value : newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
}
|
||||
// Augment Delta
|
||||
|
@ -267,7 +267,7 @@ FixedLagSmoother::Result BatchFixedLagSmoother::optimize() {
|
|||
// Put the linearization points and deltas back for specific variables
|
||||
if (enforceConsistency_ && (linearKeys_.size() > 0)) {
|
||||
theta_.update(linearKeys_);
|
||||
for(const auto& key_value: linearKeys_) {
|
||||
for(const auto key_value: linearKeys_) {
|
||||
delta_.at(key_value.key) = newDelta.at(key_value.key);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -139,7 +139,7 @@ ConcurrentBatchFilter::Result ConcurrentBatchFilter::update(const NonlinearFacto
|
|||
// Add the new variables to theta
|
||||
theta_.insert(newTheta);
|
||||
// Add new variables to the end of the ordering
|
||||
for(const Values::ConstKeyValuePair& key_value: newTheta) {
|
||||
for(const auto key_value: newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
}
|
||||
// Augment Delta
|
||||
|
@ -222,7 +222,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm
|
|||
|
||||
// Find the set of new separator keys
|
||||
KeySet newSeparatorKeys;
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
newSeparatorKeys.insert(key_value.key);
|
||||
}
|
||||
|
||||
|
@ -236,7 +236,7 @@ void ConcurrentBatchFilter::synchronize(const NonlinearFactorGraph& smootherSumm
|
|||
graph.push_back(smootherShortcut_);
|
||||
Values values;
|
||||
values.insert(smootherSummarizationValues);
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
if(!values.exists(key_value.key)) {
|
||||
values.insert(key_value.key, key_value.value);
|
||||
}
|
||||
|
@ -471,7 +471,7 @@ void ConcurrentBatchFilter::optimize(const NonlinearFactorGraph& factors, Values
|
|||
// Put the linearization points and deltas back for specific variables
|
||||
if(linearValues.size() > 0) {
|
||||
theta.update(linearValues);
|
||||
for(const Values::ConstKeyValuePair& key_value: linearValues) {
|
||||
for(const auto key_value: linearValues) {
|
||||
delta.at(key_value.key) = newDelta.at(key_value.key);
|
||||
}
|
||||
}
|
||||
|
@ -574,7 +574,7 @@ void ConcurrentBatchFilter::moveSeparator(const FastList<Key>& keysToMove) {
|
|||
|
||||
// Calculate the set of new separator keys: AffectedKeys + PreviousSeparatorKeys - KeysToMove
|
||||
KeySet newSeparatorKeys = removedFactors.keys();
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
newSeparatorKeys.insert(key_value.key);
|
||||
}
|
||||
for(Key key: keysToMove) {
|
||||
|
|
|
@ -61,7 +61,7 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::update(const NonlinearF
|
|||
theta_.insert(newTheta);
|
||||
|
||||
// Add new variables to the end of the ordering
|
||||
for(const Values::ConstKeyValuePair& key_value: newTheta) {
|
||||
for(const auto key_value: newTheta) {
|
||||
ordering_.push_back(key_value.key);
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa
|
|||
removeFactors(filterSummarizationSlots_);
|
||||
|
||||
// Insert new linpoints into the values, augment the ordering, and store new dims to augment delta
|
||||
for(const Values::ConstKeyValuePair& key_value: smootherValues) {
|
||||
for(const auto key_value: smootherValues) {
|
||||
std::pair<Values::iterator, bool> iter_inserted = theta_.tryInsert(key_value.key, key_value.value);
|
||||
if(iter_inserted.second) {
|
||||
// If the insert succeeded
|
||||
|
@ -146,7 +146,7 @@ void ConcurrentBatchSmoother::synchronize(const NonlinearFactorGraph& smootherFa
|
|||
iter_inserted.first->value = key_value.value;
|
||||
}
|
||||
}
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues) {
|
||||
for(const auto key_value: separatorValues) {
|
||||
std::pair<Values::iterator, bool> iter_inserted = theta_.tryInsert(key_value.key, key_value.value);
|
||||
if(iter_inserted.second) {
|
||||
// If the insert succeeded
|
||||
|
@ -322,7 +322,7 @@ ConcurrentBatchSmoother::Result ConcurrentBatchSmoother::optimize() {
|
|||
// Put the linearization points and deltas back for specific variables
|
||||
if(separatorValues_.size() > 0) {
|
||||
theta_.update(separatorValues_);
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
delta_.at(key_value.key) = newDelta.at(key_value.key);
|
||||
}
|
||||
}
|
||||
|
@ -372,7 +372,7 @@ void ConcurrentBatchSmoother::updateSmootherSummarization() {
|
|||
|
||||
// Get the set of separator keys
|
||||
gtsam::KeySet separatorKeys;
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
separatorKeys.insert(key_value.key);
|
||||
}
|
||||
|
||||
|
|
|
@ -69,13 +69,13 @@ ConcurrentIncrementalFilter::Result ConcurrentIncrementalFilter::update(const No
|
|||
int group = 1;
|
||||
// Set all existing variables to Group1
|
||||
if(isam2_.getLinearizationPoint().size() > 0) {
|
||||
for(const Values::ConstKeyValuePair& key_value: isam2_.getLinearizationPoint()) {
|
||||
for(const auto key_value: isam2_.getLinearizationPoint()) {
|
||||
orderingConstraints->operator[](key_value.key) = group;
|
||||
}
|
||||
++group;
|
||||
}
|
||||
// Assign new variables to the root
|
||||
for(const Values::ConstKeyValuePair& key_value: newTheta) {
|
||||
for(const auto key_value: newTheta) {
|
||||
orderingConstraints->operator[](key_value.key) = group;
|
||||
}
|
||||
// Set marginalizable variables to Group0
|
||||
|
@ -201,7 +201,7 @@ void ConcurrentIncrementalFilter::synchronize(const NonlinearFactorGraph& smooth
|
|||
|
||||
// Force iSAM2 not to relinearize anything during this iteration
|
||||
FastList<Key> noRelinKeys;
|
||||
for(const Values::ConstKeyValuePair& key_value: isam2_.getLinearizationPoint()) {
|
||||
for(const auto key_value: isam2_.getLinearizationPoint()) {
|
||||
noRelinKeys.push_back(key_value.key);
|
||||
}
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons
|
|||
// Also, mark the separator keys as fixed linearization points
|
||||
FastMap<Key,int> constrainedKeys;
|
||||
FastList<Key> noRelinKeys;
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
constrainedKeys[key_value.key] = 1;
|
||||
noRelinKeys.push_back(key_value.key);
|
||||
}
|
||||
|
@ -82,12 +82,12 @@ ConcurrentIncrementalSmoother::Result ConcurrentIncrementalSmoother::update(cons
|
|||
Values values(newTheta);
|
||||
// Unfortunately, we must be careful here, as some of the smoother values
|
||||
// and/or separator values may have been added previously
|
||||
for(const Values::ConstKeyValuePair& key_value: smootherValues_) {
|
||||
for(const auto key_value: smootherValues_) {
|
||||
if(!isam2_.getLinearizationPoint().exists(key_value.key)) {
|
||||
values.insert(key_value.key, smootherValues_.at(key_value.key));
|
||||
}
|
||||
}
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
if(!isam2_.getLinearizationPoint().exists(key_value.key)) {
|
||||
values.insert(key_value.key, separatorValues_.at(key_value.key));
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ void ConcurrentIncrementalSmoother::updateSmootherSummarization() {
|
|||
|
||||
// Get the set of separator keys
|
||||
KeySet separatorKeys;
|
||||
for(const Values::ConstKeyValuePair& key_value: separatorValues_) {
|
||||
for(const auto key_value: separatorValues_) {
|
||||
separatorKeys.insert(key_value.key);
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph,
|
|||
|
||||
|
||||
std::set<Key> KeysToKeep;
|
||||
for(const Values::ConstKeyValuePair& key_value: linPoint) { // we cycle over all the keys of factorGraph
|
||||
for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph
|
||||
KeysToKeep.insert(key_value.key);
|
||||
} // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize
|
||||
for(Key key: keysToMarginalize) {
|
||||
|
|
|
@ -560,7 +560,7 @@ TEST( ConcurrentBatchSmoother, synchronize_3 )
|
|||
GaussianFactorGraph::shared_ptr linearFactors = allFactors.linearize(allValues);
|
||||
|
||||
KeySet eliminateKeys = linearFactors->keys();
|
||||
for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) {
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
eliminateKeys.erase(key_value.key);
|
||||
}
|
||||
KeyVector variables(eliminateKeys.begin(), eliminateKeys.end());
|
||||
|
|
|
@ -80,7 +80,7 @@ NonlinearFactorGraph CalculateMarginals(const NonlinearFactorGraph& factorGraph,
|
|||
|
||||
|
||||
std::set<Key> KeysToKeep;
|
||||
for(const Values::ConstKeyValuePair& key_value: linPoint) { // we cycle over all the keys of factorGraph
|
||||
for(const auto key_value: linPoint) { // we cycle over all the keys of factorGraph
|
||||
KeysToKeep.insert(key_value.key);
|
||||
} // so far we are keeping all keys, but we want to delete the ones that we are going to marginalize
|
||||
for(Key key: keysToMarginalize) {
|
||||
|
|
|
@ -512,7 +512,7 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_2 )
|
|||
// Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1);
|
||||
Values expectedLinearizationPoint = filterSeparatorValues;
|
||||
Values actualLinearizationPoint;
|
||||
for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) {
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key));
|
||||
}
|
||||
CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6));
|
||||
|
@ -580,7 +580,7 @@ TEST( ConcurrentIncrementalSmootherDL, synchronize_3 )
|
|||
// GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate();
|
||||
|
||||
KeySet allkeys = LinFactorGraph->keys();
|
||||
for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) {
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
allkeys.erase(key_value.key);
|
||||
}
|
||||
KeyVector variables(allkeys.begin(), allkeys.end());
|
||||
|
|
|
@ -513,7 +513,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_2 )
|
|||
// Values expectedLinearizationPoint = BatchOptimize(allFactors, allValues, 1);
|
||||
Values expectedLinearizationPoint = filterSeparatorValues;
|
||||
Values actualLinearizationPoint;
|
||||
for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues) {
|
||||
for(const auto key_value: filterSeparatorValues) {
|
||||
actualLinearizationPoint.insert(key_value.key, smoother.getLinearizationPoint().at(key_value.key));
|
||||
}
|
||||
CHECK(assert_equal(expectedLinearizationPoint, actualLinearizationPoint, 1e-6));
|
||||
|
@ -582,7 +582,7 @@ TEST( ConcurrentIncrementalSmootherGN, synchronize_3 )
|
|||
// GaussianBayesNet::shared_ptr GBNsptr = GSS.eliminate();
|
||||
|
||||
KeySet allkeys = LinFactorGraph->keys();
|
||||
for(const Values::ConstKeyValuePair& key_value: filterSeparatorValues)
|
||||
for(const auto key_value: filterSeparatorValues)
|
||||
allkeys.erase(key_value.key);
|
||||
KeyVector variables(allkeys.begin(), allkeys.end());
|
||||
std::pair<GaussianBayesNet::shared_ptr, GaussianFactorGraph::shared_ptr> result = LinFactorGraph->eliminatePartialSequential(variables, EliminateCholesky);
|
||||
|
|
Loading…
Reference in New Issue