From 1affae697c574e778388ffb65a1a3c1f2e43d36e Mon Sep 17 00:00:00 2001 From: Sungtae An Date: Mon, 22 Dec 2014 14:50:26 -0500 Subject: [PATCH] Correct gradientAtZero: remove zero initialization --- gtsam/slam/RegularJacobianFactor.h | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/gtsam/slam/RegularJacobianFactor.h b/gtsam/slam/RegularJacobianFactor.h index e6e240389..bb275ef3f 100644 --- a/gtsam/slam/RegularJacobianFactor.h +++ b/gtsam/slam/RegularJacobianFactor.h @@ -141,14 +141,11 @@ public: } } - /** Raw memory access version of gradientAtZero */ + /** Raw memory access version of gradientAtZero + * TODO: currently assumes all variables of the same size D (templated) and keys arranged from 0 to n + */ virtual void gradientAtZero(double* d) const { - // Initialize d as a zero vector - for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) { - DMap(d + D * j) = DVector::Zero(); - } - // Get vector b not weighted Vector b = getb(); @@ -159,13 +156,12 @@ public: } // Just iterate over all A matrices - for (size_t pos = 0; pos < size(); ++pos) { - Key j = keys_[pos]; + for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) { DVector dj; // gradient -= A'*b/sigma^2 // Computing with each column for (size_t k = 0; k < D; ++k){ - Vector column_k = Ab_(pos).col(k); + Vector column_k = Ab_(j).col(k); dj(k) = -1.0*dot(b, column_k); } DMap(d + D * j) += dj;