Add gradientAtZero (Raw memory access)
parent
c6faa784e2
commit
174f60762a
|
|
@ -28,6 +28,13 @@ namespace gtsam {
|
||||||
template<size_t D>
|
template<size_t D>
|
||||||
class RegularJacobianFactor: public JacobianFactor {
|
class RegularJacobianFactor: public JacobianFactor {
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
/** Use eigen magic to access raw memory */
|
||||||
|
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||||
|
typedef Eigen::Map<DVector> DMap;
|
||||||
|
typedef Eigen::Map<const DVector> ConstDMap;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
/** Construct an n-ary factor
|
/** Construct an n-ary factor
|
||||||
|
|
@ -50,60 +57,57 @@ public:
|
||||||
JacobianFactor(keys, augmentedMatrix, sigmas) {
|
JacobianFactor(keys, augmentedMatrix, sigmas) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// y += alpha * A'*A*x
|
/** y += alpha * A'*A*x */
|
||||||
void multiplyHessianAdd(double alpha, const VectorValues& x,
|
void multiplyHessianAdd(double alpha, const VectorValues& x,
|
||||||
VectorValues& y) const {
|
VectorValues& y) const {
|
||||||
JacobianFactor::multiplyHessianAdd(alpha, x, y);
|
JacobianFactor::multiplyHessianAdd(alpha, x, y);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note: this is not assuming a fixed dimension for the variables,
|
/** Raw memory access version of multiplyHessianAdd
|
||||||
// but requires the vector accumulatedDims to tell the dimension of
|
* Note: this is not assuming a fixed dimension for the variables,
|
||||||
// each variable: e.g.: x0 has dim 3, x2 has dim 6, x3 has dim 2,
|
* but requires the vector accumulatedDims to tell the dimension of
|
||||||
// then accumulatedDims is [0 3 9 11 13]
|
* each variable: e.g.: x0 has dim 3, x2 has dim 6, x3 has dim 2,
|
||||||
// NOTE: size of accumulatedDims is size of keys + 1!!
|
* then accumulatedDims is [0 3 9 11 13]
|
||||||
|
* NOTE: size of accumulatedDims is size of keys + 1!! */
|
||||||
void multiplyHessianAdd(double alpha, const double* x, double* y,
|
void multiplyHessianAdd(double alpha, const double* x, double* y,
|
||||||
const std::vector<size_t>& accumulatedDims) const {
|
const std::vector<size_t>& accumulatedDims) const {
|
||||||
|
|
||||||
// Use eigen magic to access raw memory
|
/// Use eigen magic to access raw memory
|
||||||
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> DVector;
|
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> VectorD;
|
||||||
typedef Eigen::Map<DVector> DMap;
|
typedef Eigen::Map<VectorD> MapD;
|
||||||
typedef Eigen::Map<const DVector> ConstDMap;
|
typedef Eigen::Map<const VectorD> ConstMapD;
|
||||||
|
|
||||||
if (empty())
|
if (empty())
|
||||||
return;
|
return;
|
||||||
Vector Ax = zero(Ab_.rows());
|
Vector Ax = zero(Ab_.rows());
|
||||||
|
|
||||||
// Just iterate over all A matrices and multiply in correct config part (looping over keys)
|
/// Just iterate over all A matrices and multiply in correct config part (looping over keys)
|
||||||
// E.g.: Jacobian A = [A0 A1 A2] multiplies x = [x0 x1 x2]'
|
/// E.g.: Jacobian A = [A0 A1 A2] multiplies x = [x0 x1 x2]'
|
||||||
// Hence: Ax = A0 x0 + A1 x1 + A2 x2 (hence we loop over the keys and accumulate)
|
/// Hence: Ax = A0 x0 + A1 x1 + A2 x2 (hence we loop over the keys and accumulate)
|
||||||
for (size_t pos = 0; pos < size(); ++pos)
|
for (size_t pos = 0; pos < size(); ++pos)
|
||||||
{
|
{
|
||||||
Ax += Ab_(pos)
|
Ax += Ab_(pos)
|
||||||
* ConstDMap(x + accumulatedDims[keys_[pos]], accumulatedDims[keys_[pos] + 1] - accumulatedDims[keys_[pos]]);
|
* ConstMapD(x + accumulatedDims[keys_[pos]], accumulatedDims[keys_[pos] + 1] - accumulatedDims[keys_[pos]]);
|
||||||
}
|
}
|
||||||
// Deal with noise properly, need to Double* whiten as we are dividing by variance
|
/// Deal with noise properly, need to Double* whiten as we are dividing by variance
|
||||||
if (model_) {
|
if (model_) {
|
||||||
model_->whitenInPlace(Ax);
|
model_->whitenInPlace(Ax);
|
||||||
model_->whitenInPlace(Ax);
|
model_->whitenInPlace(Ax);
|
||||||
}
|
}
|
||||||
|
|
||||||
// multiply with alpha
|
/// multiply with alpha
|
||||||
Ax *= alpha;
|
Ax *= alpha;
|
||||||
|
|
||||||
// Again iterate over all A matrices and insert Ai^T into y
|
/// Again iterate over all A matrices and insert Ai^T into y
|
||||||
for (size_t pos = 0; pos < size(); ++pos){
|
for (size_t pos = 0; pos < size(); ++pos){
|
||||||
DMap(y + accumulatedDims[keys_[pos]], accumulatedDims[keys_[pos] + 1] - accumulatedDims[keys_[pos]]) += Ab_(
|
MapD(y + accumulatedDims[keys_[pos]], accumulatedDims[keys_[pos] + 1] - accumulatedDims[keys_[pos]]) += Ab_(
|
||||||
pos).transpose() * Ax;
|
pos).transpose() * Ax;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Raw memory access version of multiplyHessianAdd */
|
||||||
void multiplyHessianAdd(double alpha, const double* x, double* y) const {
|
void multiplyHessianAdd(double alpha, const double* x, double* y) const {
|
||||||
|
|
||||||
// Use eigen magic to access raw memory
|
|
||||||
typedef Eigen::Matrix<double, D, 1> DVector;
|
|
||||||
typedef Eigen::Map<DVector> DMap;
|
|
||||||
typedef Eigen::Map<const DVector> ConstDMap;
|
|
||||||
|
|
||||||
if (empty()) return;
|
if (empty()) return;
|
||||||
Vector Ax = zero(Ab_.rows());
|
Vector Ax = zero(Ab_.rows());
|
||||||
|
|
||||||
|
|
@ -122,19 +126,16 @@ public:
|
||||||
DMap(y + D * keys_[pos]) += Ab_(pos).transpose() * Ax;
|
DMap(y + D * keys_[pos]) += Ab_(pos).transpose() * Ax;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return the diagonal of the Hessian for this factor
|
// /// Return the diagonal of the Hessian for this factor
|
||||||
VectorValues hessianDiagonal() const {
|
// VectorValues hessianDiagonal() const {
|
||||||
return JacobianFactor::hessianDiagonal();
|
// return JacobianFactor::hessianDiagonal();
|
||||||
}
|
// }
|
||||||
|
|
||||||
/// Raw memory access version of hessianDiagonal
|
/** Raw memory access version of hessianDiagonal
|
||||||
/* ************************************************************************* */
|
* TODO: currently assumes all variables of the same size D (templated) and keys arranged from 0 to n
|
||||||
// TODO: currently assumes all variables of the same size D (templated) and keys arranged from 0 to n
|
*
|
||||||
|
*/
|
||||||
void hessianDiagonal(double* d) const {
|
void hessianDiagonal(double* d) const {
|
||||||
// Use eigen magic to access raw memory
|
|
||||||
typedef Eigen::Matrix<double, D, 1> DVector;
|
|
||||||
typedef Eigen::Map<DVector> DMap;
|
|
||||||
|
|
||||||
// Loop over all variables in the factor
|
// Loop over all variables in the factor
|
||||||
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||||
// Get the diagonal block, and insert its diagonal
|
// Get the diagonal block, and insert its diagonal
|
||||||
|
|
@ -152,12 +153,28 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** // Gradient is really -A'*b / sigma^2 */
|
||||||
VectorValues gradientAtZero() const {
|
VectorValues gradientAtZero() const {
|
||||||
return JacobianFactor::gradientAtZero();
|
return JacobianFactor::gradientAtZero();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Raw memory access version of gradientAtZero */
|
||||||
void gradientAtZero(double* d) const {
|
void gradientAtZero(double* d) const {
|
||||||
//throw std::runtime_error("gradientAtZero not implemented for Jacobian factor");
|
// Get vector b not weighted
|
||||||
|
Vector b = getb();
|
||||||
|
Vector b_sigma = model_ ? (model_->whiten(b)*model_->whiten(b)) : b;
|
||||||
|
|
||||||
|
// gradient -= A'*b/sigma^2
|
||||||
|
// Loop over all variables in the factor
|
||||||
|
for (DenseIndex j = 0; j < (DenseIndex) size(); ++j) {
|
||||||
|
// Get the diagonal block, and insert its diagonal
|
||||||
|
DVector dj;
|
||||||
|
for (size_t k = 0; k < D; ++k){
|
||||||
|
Vector column_k = Ab_(j).col(k);
|
||||||
|
dj(k) = -1.0*dot(b_sigma,column_k);
|
||||||
|
}
|
||||||
|
DMap(d + D * j) += dj;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue