transposeMultiplyAdd provied BLAS-style call for iterative speed

release/4.3a0
Frank Dellaert 2010-01-30 23:59:29 +00:00
parent 6ef09583b9
commit 9b4ff5e099
8 changed files with 64 additions and 6 deletions

View File

@ -60,6 +60,14 @@ namespace gtsam {
}
/* ************************************************************************* */
// y += alpha*inv(R')*A'*e
void BayesNetPreconditioner::transposeMultiplyAdd(double alpha,
const Errors& e, VectorConfig& y) const {
VectorConfig x = Ab_ ^ e; // x = A'*e2
y += alpha * gtsam::backSubstituteTranspose(Rd_, x); // TODO avoid temp
}
/* ************************************************************************* */
} // namespace gtsam

View File

@ -58,6 +58,9 @@ namespace gtsam {
/** Apply operator A' */
VectorConfig operator^(const Errors& e) const;
/** BLAS level 2 equivalent y += alpha*inv(R')*A'*e */
void transposeMultiplyAdd(double alpha, const Errors& e, VectorConfig& y) const;
};
} // namespace gtsam

View File

@ -86,6 +86,16 @@ VectorConfig GaussianFactorGraph::operator^(const Errors& e) const {
return x;
}
/* ************************************************************************* */
// x += alpha*A'*e
void GaussianFactorGraph::transposeMultiplyAdd(double alpha, const Errors& e,
VectorConfig& x) const {
// For each factor add the gradient contribution
Errors::const_iterator it = e.begin();
BOOST_FOREACH(sharedFactor Ai,factors_)
x += (*Ai)^(alpha*(*(it++)));
}
/* ************************************************************************* */
VectorConfig GaussianFactorGraph::gradient(const VectorConfig& x) const {
const GaussianFactorGraph& A = *this;

View File

@ -91,9 +91,12 @@ namespace gtsam {
/* In-place version e <- A*x that takes an iterator. */
void multiplyInPlace(const VectorConfig& x, const Errors::iterator& e) const;
/** return A^x */
/** return A^e */
VectorConfig operator^(const Errors& e) const;
/** x += alpha*A'*e */
void transposeMultiplyAdd(double alpha, const Errors& e, VectorConfig& x) const;
/**
* Calculate Gradient of A^(A*x-b) for a given config
* @param x: VectorConfig specifying where to calculate gradient

View File

@ -92,14 +92,14 @@ namespace gtsam {
// Apply operator A', A'*e = [I inv(R1')*A2']*e = e1 + inv(R1')*A2'*e2
VectorConfig SubgraphPreconditioner::operator^(const Errors& e) const {
VectorConfig y1;
VectorConfig y;
// Use BayesNet order to remove y contributions in order
Errors::const_iterator it = e.begin();
BOOST_FOREACH(GaussianConditional::shared_ptr cg, *Rc1_) {
const Symbol& j = cg->key();
const Vector& ej = *(it++);
y1.insert(j,ej);
y.insert(j,ej);
}
// create e2 with what's left of e
@ -109,9 +109,34 @@ namespace gtsam {
// get A2 part,
VectorConfig x = *Ab2_ ^ e2; // x = A2'*e2
VectorConfig y2 = gtsam::backSubstituteTranspose(*Rc1_, x); // inv(R1')*x;
y += gtsam::backSubstituteTranspose(*Rc1_, x); // inv(R1')*x;
return y1 + y2;
return y;
}
/* ************************************************************************* */
// y += alpha*A'*e
// TODO avoid code duplication
void SubgraphPreconditioner::transposeMultiplyAdd
(double alpha, const Errors& e, VectorConfig& y) const {
// Use BayesNet order to remove y contributions in order
Errors::const_iterator it = e.begin();
BOOST_FOREACH(GaussianConditional::shared_ptr cg, *Rc1_) {
const Symbol& j = cg->key();
const Vector& ej = *(it++);
Vector& yj = y.getReference(j);
axpy(alpha,ej,yj);
}
// create e2 with what's left of e
Errors e2;
while (it != e.end())
e2.push_back(*(it++));
// get A2 part,
VectorConfig x = *Ab2_ ^ e2; // x = A2'*e2
y += alpha * gtsam::backSubstituteTranspose(*Rc1_, x); // inv(R1')*x;
}
/* ************************************************************************* */

View File

@ -70,6 +70,9 @@ namespace gtsam {
/** Apply operator A' */
VectorConfig operator^(const Errors& e) const;
/** y += alpha*A'*e */
void transposeMultiplyAdd(double alpha, const Errors& e, VectorConfig& y) const;
/** print the object */
void print(const std::string& s = "SubgraphPreconditioner") const;
};

View File

@ -54,7 +54,8 @@ namespace gtsam {
if (k%reset==0)
g = Ab.gradient(x);
else
axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad)
// axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad)
Ab.transposeMultiplyAdd(alpha, Ad, g);
// check for convergence
double gamma = dot(g, g);

View File

@ -61,6 +61,11 @@ namespace gtsam {
return A_ ^ e;
}
/** x += alpha* A_^T */
inline void transposeMultiplyAdd(double alpha, const Vector& e, Vector& x) const {
x += alpha * A_ ^ e;
}
/**
* Print with optional string
*/