added gradientAtZero with raw memory access
parent
67cfe5ea66
commit
37b750411f
|
|
@ -130,6 +130,9 @@ namespace gtsam {
|
||||||
/// A'*b for Jacobian, eta for Hessian
|
/// A'*b for Jacobian, eta for Hessian
|
||||||
virtual VectorValues gradientAtZero() const = 0;
|
virtual VectorValues gradientAtZero() const = 0;
|
||||||
|
|
||||||
|
/// A'*b for Jacobian, eta for Hessian (raw memory version)
|
||||||
|
virtual void gradientAtZero(double* d) const = 0;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
/** Serialization function */
|
/** Serialization function */
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
|
|
|
||||||
|
|
@ -257,7 +257,7 @@ namespace gtsam {
|
||||||
* @param [output] g A VectorValues to store the gradient, which must be preallocated,
|
* @param [output] g A VectorValues to store the gradient, which must be preallocated,
|
||||||
* see allocateVectorValues
|
* see allocateVectorValues
|
||||||
* @return The gradient as a VectorValues */
|
* @return The gradient as a VectorValues */
|
||||||
VectorValues gradientAtZero() const;
|
virtual VectorValues gradientAtZero() const;
|
||||||
|
|
||||||
/** Optimize along the gradient direction, with a closed-form computation to perform the line
|
/** Optimize along the gradient direction, with a closed-form computation to perform the line
|
||||||
* search. The gradient is computed about \f$ \delta x=0 \f$.
|
* search. The gradient is computed about \f$ \delta x=0 \f$.
|
||||||
|
|
|
||||||
|
|
@ -600,6 +600,23 @@ VectorValues HessianFactor::gradientAtZero() const {
|
||||||
return g;
|
return g;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// TODO: currently assumes all variables of the same size 9 and keys arranged from 0 to n
|
||||||
|
void HessianFactor::gradientAtZero(double* d) const {
|
||||||
|
|
||||||
|
// Use eigen magic to access raw memory
|
||||||
|
typedef Eigen::Matrix<double, 9, 1> DVector;
|
||||||
|
typedef Eigen::Map<DVector> DMap;
|
||||||
|
|
||||||
|
// Loop over all variables in the factor
|
||||||
|
for (DenseIndex pos = 0; pos < (DenseIndex)size(); ++pos) {
|
||||||
|
Key j = keys_[pos];
|
||||||
|
// Get the diagonal block, and insert its diagonal
|
||||||
|
DVector dj = -info_(pos,size()).knownOffDiagonal();
|
||||||
|
DMap(d + 9 * j) += dj;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<HessianFactor> >
|
std::pair<boost::shared_ptr<GaussianConditional>, boost::shared_ptr<HessianFactor> >
|
||||||
EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys)
|
EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys)
|
||||||
|
|
|
||||||
|
|
@ -387,6 +387,8 @@ namespace gtsam {
|
||||||
/// eta for Hessian
|
/// eta for Hessian
|
||||||
VectorValues gradientAtZero() const;
|
VectorValues gradientAtZero() const;
|
||||||
|
|
||||||
|
virtual void gradientAtZero(double* d) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Densely partially eliminate with Cholesky factorization. JacobianFactors are
|
* Densely partially eliminate with Cholesky factorization. JacobianFactors are
|
||||||
* left-multiplied with their transpose to form the Hessian using the conversion constructor
|
* left-multiplied with their transpose to form the Hessian using the conversion constructor
|
||||||
|
|
|
||||||
|
|
@ -573,6 +573,11 @@ VectorValues JacobianFactor::gradientAtZero() const {
|
||||||
return g;
|
return g;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
void JacobianFactor::gradientAtZero(double* d) const {
|
||||||
|
throw std::runtime_error("gradientAtZero not implemented for Jacobian factor");
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
pair<Matrix, Vector> JacobianFactor::jacobian() const {
|
pair<Matrix, Vector> JacobianFactor::jacobian() const {
|
||||||
pair<Matrix, Vector> result = jacobianUnweighted();
|
pair<Matrix, Vector> result = jacobianUnweighted();
|
||||||
|
|
|
||||||
|
|
@ -286,6 +286,9 @@ namespace gtsam {
|
||||||
/// A'*b for Jacobian
|
/// A'*b for Jacobian
|
||||||
VectorValues gradientAtZero() const;
|
VectorValues gradientAtZero() const;
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
virtual void gradientAtZero(double* d) const;
|
||||||
|
|
||||||
/** Return a whitened version of the factor, i.e. with unit diagonal noise model. */
|
/** Return a whitened version of the factor, i.e. with unit diagonal noise model. */
|
||||||
JacobianFactor whiten() const;
|
JacobianFactor whiten() const;
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -426,6 +426,28 @@ public:
|
||||||
return g;
|
return g;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Calculate gradient, which is -F'Q*b, see paper - RAW MEMORY ACCESS
|
||||||
|
*/
|
||||||
|
void gradientAtZero(double* d) const {
|
||||||
|
|
||||||
|
// Use eigen magic to access raw memory
|
||||||
|
typedef Eigen::Matrix<double, D, 1> DVector;
|
||||||
|
typedef Eigen::Map<DVector> DMap;
|
||||||
|
|
||||||
|
// calculate Q*b
|
||||||
|
e1.resize(size());
|
||||||
|
e2.resize(size());
|
||||||
|
for (size_t k = 0; k < size(); k++)
|
||||||
|
e1[k] = b_.segment < 2 > (2 * k);
|
||||||
|
projectError(e1, e2);
|
||||||
|
|
||||||
|
for (size_t k = 0; k < size(); ++k) { // for each camera in the factor
|
||||||
|
Key j = keys_[k];
|
||||||
|
DMap(d + D * j) += -Fblocks_[k].second.transpose() * e2[k];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
// ImplicitSchurFactor
|
// ImplicitSchurFactor
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue