From 7a504f3babdc05684cc97b6605788e6524353f8a Mon Sep 17 00:00:00 2001 From: Sungtae An Date: Wed, 12 Nov 2014 04:25:28 -0500 Subject: [PATCH] Create JacobianFactor derived class for fixed size and add raw memory access --- gtsam/slam/RegularJacobianFactor.h | 143 ++++++++++++++++++++++++++++- 1 file changed, 140 insertions(+), 3 deletions(-) diff --git a/gtsam/slam/RegularJacobianFactor.h b/gtsam/slam/RegularJacobianFactor.h index 0223c66ab..a518505ca 100644 --- a/gtsam/slam/RegularJacobianFactor.h +++ b/gtsam/slam/RegularJacobianFactor.h @@ -1,4 +1,141 @@ -#ifndef REGULARJACOBIANFACTOR_H -#define REGULARJACOBIANFACTOR_H +/* ---------------------------------------------------------------------------- + + * GTSAM Copyright 2010, Georgia Tech Research Corporation, + * Atlanta, Georgia 30332-0415 + * All Rights Reserved + * Authors: Frank Dellaert, et al. (see THANKS for the full author list) + + * See LICENSE for the license information + + * -------------------------------------------------------------------------- */ + +/** + * @file RegularJacobianFactor.h + * @brief JacobianFactor class with fixed sized blcoks + * @author Sungtae An + * @date Nov 11, 2014 + */ + +#pragma once + +#include +#include +#include + +namespace gtsam { + +template +class RegularJacobianFactor: public JacobianFactor { + +private: + + typedef Eigen::Matrix MatrixDD; // camera hessian block + typedef Eigen::Matrix VectorD; + // Use eigen magic to access raw memory + typedef Eigen::Map DMap; + typedef Eigen::Map ConstDMap; + +public: + + /** Construct an n-ary factor + * @tparam TERMS A container whose value type is std::pair, specifying the + * collection of keys and matrices making up the factor. */ + template + RegularJacobianFactor(const TERMS& terms, const Vector& b, + const SharedDiagonal& model = SharedDiagonal()) : + JacobianFactor(terms, b, model) { + } + + /** Constructor with arbitrary number keys, and where the augmented matrix is given all together + * instead of in block terms. Note that only the active view of the provided augmented matrix + * is used, and that the matrix data is copied into a newly-allocated matrix in the constructed + * factor. */ + template + RegularJacobianFactor(const KEYS& keys, + const VerticalBlockMatrix& augmentedMatrix, + const SharedDiagonal& sigmas = SharedDiagonal()) : + JacobianFactor(keys, augmentedMatrix, sigmas) { + } + + /// Return the diagonal of the Hessian for this factor + VectorValues hessianDiagonal() const { + return JacobianFactor::hessianDiagonal(); + } + + /// Raw memory access version of hessianDiagonal + void hessianDiagonal(double* d) const { + // Loop over all variables in the factor + for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) { + // Get the diagonal block, and insert its diagonal + DVector dj; + for (size_t k = 0; k < D; ++k) + dj(k) = Ab_(j).col(k).squaredNorm(); + + DMap(d + D * j) += dj; + } + } + + /// y += alpha * A'*A*x + void multiplyHessianAdd(double alpha, const VectorValues& x, + VectorValues& y) const { + JacobianFactor::multiplyHessianAdd(alpha, x, y); + } + + void multiplyHessianAdd(double alpha, const double* x, double* y, + std::vector keys) const { + if (empty()) + return; + Vector Ax = zero(Ab_.rows()); + + // Just iterate over all A matrices and multiply in correct config part + for (size_t pos = 0; pos < size(); ++pos) + Ax += Ab_(pos) + * ConstDMap(x + keys[keys_[pos]], + keys[keys_[pos] + 1] - keys[keys_[pos]]); + + // Deal with noise properly, need to Double* whiten as we are dividing by variance + if (model_) { + model_->whitenInPlace(Ax); + model_->whitenInPlace(Ax); + } + + // multiply with alpha + Ax *= alpha; + + // Again iterate over all A matrices and insert Ai^e into y + for (size_t pos = 0; pos < size(); ++pos) + DMap(y + keys[keys_[pos]], keys[keys_[pos] + 1] - keys[keys_[pos]]) += Ab_( + pos).transpose() * Ax; + } + + void multiplyHessianAdd(double alpha, const double* x, double* y) const { + if (empty()) return; + Vector Ax = zero(Ab_.rows()); + + // Just iterate over all A matrices and multiply in correct config part + for(size_t pos=0; poswhitenInPlace(Ax); model_->whitenInPlace(Ax); } + + // multiply with alpha + Ax *= alpha; + + // Again iterate over all A matrices and insert Ai^e into y + for(size_t pos=0; pos