diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index a63bbf473..723d84d57 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -460,25 +460,6 @@ VectorValues JacobianFactor::hessianDiagonal() const { return d; } -/* ************************************************************************* */ -// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n -void JacobianFactor::hessianDiagonal(double* d) const { - - // Use eigen magic to access raw memory - typedef Eigen::Matrix DVector; - typedef Eigen::Map DMap; - - // Loop over all variables in the factor - for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) { - // Get the diagonal block, and insert its diagonal - DVector dj; - for (size_t k = 0; k < 9; ++k) - dj(k) = Ab_(j).col(k).squaredNorm(); - - DMap(d + 9 * j) += dj; - } -} - /* ************************************************************************* */ map JacobianFactor::hessianBlockDiagonal() const { map blocks; @@ -528,40 +509,6 @@ void JacobianFactor::multiplyHessianAdd(double alpha, const VectorValues& x, transposeMultiplyAdd(alpha, Ax, y); } -void JacobianFactor::multiplyHessianAdd(double alpha, const double* x, - double* y, std::vector keys) const { - - // Use eigen magic to access raw memory - typedef Eigen::Matrix DVector; - typedef Eigen::Map DMap; - typedef Eigen::Map ConstDMap; - - if (empty()) - return; - Vector Ax = zero(Ab_.rows()); - - // Just iterate over all A matrices and multiply in correct config part - for (size_t pos = 0; pos < size(); ++pos) - Ax += Ab_(pos) - * ConstDMap(x + keys[keys_[pos]], - keys[keys_[pos] + 1] - keys[keys_[pos]]); - - // Deal with noise properly, need to Double* whiten as we are dividing by variance - if (model_) { - model_->whitenInPlace(Ax); - model_->whitenInPlace(Ax); - } - - // multiply with alpha - Ax *= alpha; - - // Again iterate over all A matrices and insert Ai^e into y - for (size_t pos = 0; pos < size(); ++pos) - DMap(y + keys[keys_[pos]], keys[keys_[pos] + 1] - keys[keys_[pos]]) += Ab_( - pos).transpose() * Ax; - -} - /* ************************************************************************* */ VectorValues JacobianFactor::gradientAtZero() const { VectorValues g; @@ -573,11 +520,6 @@ VectorValues JacobianFactor::gradientAtZero() const { return g; } -/* ************************************************************************* */ -void JacobianFactor::gradientAtZero(double* d) const { - //throw std::runtime_error("gradientAtZero not implemented for Jacobian factor"); -} - /* ************************************************************************* */ pair JacobianFactor::jacobian() const { pair result = jacobianUnweighted(); diff --git a/gtsam/linear/JacobianFactor.h b/gtsam/linear/JacobianFactor.h index 99ea91cd9..6057b7819 100644 --- a/gtsam/linear/JacobianFactor.h +++ b/gtsam/linear/JacobianFactor.h @@ -185,9 +185,6 @@ namespace gtsam { /// Return the diagonal of the Hessian for this factor virtual VectorValues hessianDiagonal() const; - /// Raw memory access version of hessianDiagonal - virtual void hessianDiagonal(double* d) const; - /// Return the block diagonal of the Hessian for this factor virtual std::map hessianBlockDiagonal() const; @@ -279,16 +276,9 @@ namespace gtsam { /** y += alpha * A'*A*x */ void multiplyHessianAdd(double alpha, const VectorValues& x, VectorValues& y) const; - void multiplyHessianAdd(double alpha, const double* x, double* y, std::vector keys) const; - - void multiplyHessianAdd(double alpha, const double* x, double* y) const {}; - /// A'*b for Jacobian VectorValues gradientAtZero() const; - /* ************************************************************************* */ - virtual void gradientAtZero(double* d) const; - /** Return a whitened version of the factor, i.e. with unit diagonal noise model. */ JacobianFactor whiten() const;