many performance tweaks

release/4.3a0
Frank Dellaert 2019-04-08 16:53:59 -04:00
parent 2cedda703c
commit 9ec714063a
1 changed files with 29 additions and 21 deletions

View File

@ -430,10 +430,9 @@ Vector JacobianFactor::unweighted_error(const VectorValues& c) const {
/* ************************************************************************* */ /* ************************************************************************* */
Vector JacobianFactor::error_vector(const VectorValues& c) const { Vector JacobianFactor::error_vector(const VectorValues& c) const {
if (model_) Vector e = unweighted_error(c);
return model_->whiten(unweighted_error(c)); if (model_) model_->whitenInPlace(e);
else return e;
return unweighted_error(c);
} }
/* ************************************************************************* */ /* ************************************************************************* */
@ -474,10 +473,10 @@ VectorValues JacobianFactor::hessianDiagonal() const {
for (size_t k = 0; k < nj; ++k) { for (size_t k = 0; k < nj; ++k) {
Vector column_k = Ab_(pos).col(k); Vector column_k = Ab_(pos).col(k);
if (model_) if (model_)
column_k = model_->whiten(column_k); model_->whitenInPlace(column_k);
dj(k) = dot(column_k, column_k); dj(k) = dot(column_k, column_k);
} }
d.insert(j, dj); d.emplace(j, dj);
} }
return d; return d;
} }
@ -496,7 +495,7 @@ map<Key, Matrix> JacobianFactor::hessianBlockDiagonal() const {
Matrix Aj = Ab_(pos); Matrix Aj = Ab_(pos);
if (model_) if (model_)
Aj = model_->Whiten(Aj); Aj = model_->Whiten(Aj);
blocks.insert(make_pair(j, Aj.transpose() * Aj)); blocks.emplace(j, Aj.transpose() * Aj);
} }
return blocks; return blocks;
} }
@ -541,29 +540,38 @@ void JacobianFactor::updateHessian(const KeyVector& infoKeys,
/* ************************************************************************* */ /* ************************************************************************* */
Vector JacobianFactor::operator*(const VectorValues& x) const { Vector JacobianFactor::operator*(const VectorValues& x) const {
Vector Ax = Vector::Zero(Ab_.rows()); Vector Ax(Ab_.rows());
Ax.setZero();
if (empty()) if (empty())
return Ax; return Ax;
// Just iterate over all A matrices and multiply in correct config part // Just iterate over all A matrices and multiply in correct config part
for (size_t pos = 0; pos < size(); ++pos) for (size_t pos = 0; pos < size(); ++pos) {
Ax += Ab_(pos) * x[keys_[pos]]; // http://eigen.tuxfamily.org/dox/TopicWritingEfficientProductExpression.html
Ax.noalias() += Ab_(pos) * x[keys_[pos]];
}
return model_ ? model_->whiten(Ax) : Ax; if (model_) model_->whitenInPlace(Ax);
return Ax;
} }
/* ************************************************************************* */ /* ************************************************************************* */
void JacobianFactor::transposeMultiplyAdd(double alpha, const Vector& e, void JacobianFactor::transposeMultiplyAdd(double alpha, const Vector& e,
VectorValues& x) const { VectorValues& x) const {
Vector E = alpha * (model_ ? model_->whiten(e) : e); Vector E(e.size());
E.noalias() = alpha * e;
if (model_) model_->whitenInPlace(E);
// Just iterate over all A matrices and insert Ai^e into VectorValues // Just iterate over all A matrices and insert Ai^e into VectorValues
for (size_t pos = 0; pos < size(); ++pos) { for (size_t pos = 0; pos < size(); ++pos) {
Key j = keys_[pos]; const Key j = keys_[pos];
// Create the value as a zero vector if it does not exist. // To avoid another malloc if key exists, we explicitly check
pair<VectorValues::iterator, bool> xi = x.tryInsert(j, Vector()); auto it = x.find(j);
if (xi.second) if (it != x.end()) {
xi.first->second = Vector::Zero(getDim(begin() + pos)); // http://eigen.tuxfamily.org/dox/TopicWritingEfficientProductExpression.html
xi.first->second += Ab_(pos).transpose()*E; it->second.noalias() += Ab_(pos).transpose() * E;
} else {
x.emplace(j, Ab_(pos).transpose() * E);
}
} }
} }
@ -625,8 +633,8 @@ VectorValues JacobianFactor::gradientAtZero() const {
Vector b = getb(); Vector b = getb();
// Gradient is really -A'*b / sigma^2 // Gradient is really -A'*b / sigma^2
// transposeMultiplyAdd will divide by sigma once, so we need one more // transposeMultiplyAdd will divide by sigma once, so we need one more
Vector b_sigma = model_ ? model_->whiten(b) : b; if (model_) model_->whitenInPlace(b);
this->transposeMultiplyAdd(-1.0, b_sigma, g); // g -= A'*b/sigma^2 this->transposeMultiplyAdd(-1.0, b, g); // g -= A'*b/sigma^2
return g; return g;
} }