transposeMultiplyAdd provied BLAS-style call for iterative speed
parent
6ef09583b9
commit
9b4ff5e099
|
@ -60,6 +60,14 @@ namespace gtsam {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
// y += alpha*inv(R')*A'*e
|
||||||
|
void BayesNetPreconditioner::transposeMultiplyAdd(double alpha,
|
||||||
|
const Errors& e, VectorConfig& y) const {
|
||||||
|
VectorConfig x = Ab_ ^ e; // x = A'*e2
|
||||||
|
y += alpha * gtsam::backSubstituteTranspose(Rd_, x); // TODO avoid temp
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
|
||||||
} // namespace gtsam
|
} // namespace gtsam
|
||||||
|
|
||||||
|
|
|
@ -58,6 +58,9 @@ namespace gtsam {
|
||||||
|
|
||||||
/** Apply operator A' */
|
/** Apply operator A' */
|
||||||
VectorConfig operator^(const Errors& e) const;
|
VectorConfig operator^(const Errors& e) const;
|
||||||
|
|
||||||
|
/** BLAS level 2 equivalent y += alpha*inv(R')*A'*e */
|
||||||
|
void transposeMultiplyAdd(double alpha, const Errors& e, VectorConfig& y) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace gtsam
|
} // namespace gtsam
|
||||||
|
|
|
@ -86,6 +86,16 @@ VectorConfig GaussianFactorGraph::operator^(const Errors& e) const {
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// x += alpha*A'*e
|
||||||
|
void GaussianFactorGraph::transposeMultiplyAdd(double alpha, const Errors& e,
|
||||||
|
VectorConfig& x) const {
|
||||||
|
// For each factor add the gradient contribution
|
||||||
|
Errors::const_iterator it = e.begin();
|
||||||
|
BOOST_FOREACH(sharedFactor Ai,factors_)
|
||||||
|
x += (*Ai)^(alpha*(*(it++)));
|
||||||
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
VectorConfig GaussianFactorGraph::gradient(const VectorConfig& x) const {
|
VectorConfig GaussianFactorGraph::gradient(const VectorConfig& x) const {
|
||||||
const GaussianFactorGraph& A = *this;
|
const GaussianFactorGraph& A = *this;
|
||||||
|
|
|
@ -91,9 +91,12 @@ namespace gtsam {
|
||||||
/* In-place version e <- A*x that takes an iterator. */
|
/* In-place version e <- A*x that takes an iterator. */
|
||||||
void multiplyInPlace(const VectorConfig& x, const Errors::iterator& e) const;
|
void multiplyInPlace(const VectorConfig& x, const Errors::iterator& e) const;
|
||||||
|
|
||||||
/** return A^x */
|
/** return A^e */
|
||||||
VectorConfig operator^(const Errors& e) const;
|
VectorConfig operator^(const Errors& e) const;
|
||||||
|
|
||||||
|
/** x += alpha*A'*e */
|
||||||
|
void transposeMultiplyAdd(double alpha, const Errors& e, VectorConfig& x) const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate Gradient of A^(A*x-b) for a given config
|
* Calculate Gradient of A^(A*x-b) for a given config
|
||||||
* @param x: VectorConfig specifying where to calculate gradient
|
* @param x: VectorConfig specifying where to calculate gradient
|
||||||
|
|
|
@ -92,14 +92,14 @@ namespace gtsam {
|
||||||
// Apply operator A', A'*e = [I inv(R1')*A2']*e = e1 + inv(R1')*A2'*e2
|
// Apply operator A', A'*e = [I inv(R1')*A2']*e = e1 + inv(R1')*A2'*e2
|
||||||
VectorConfig SubgraphPreconditioner::operator^(const Errors& e) const {
|
VectorConfig SubgraphPreconditioner::operator^(const Errors& e) const {
|
||||||
|
|
||||||
VectorConfig y1;
|
VectorConfig y;
|
||||||
|
|
||||||
// Use BayesNet order to remove y contributions in order
|
// Use BayesNet order to remove y contributions in order
|
||||||
Errors::const_iterator it = e.begin();
|
Errors::const_iterator it = e.begin();
|
||||||
BOOST_FOREACH(GaussianConditional::shared_ptr cg, *Rc1_) {
|
BOOST_FOREACH(GaussianConditional::shared_ptr cg, *Rc1_) {
|
||||||
const Symbol& j = cg->key();
|
const Symbol& j = cg->key();
|
||||||
const Vector& ej = *(it++);
|
const Vector& ej = *(it++);
|
||||||
y1.insert(j,ej);
|
y.insert(j,ej);
|
||||||
}
|
}
|
||||||
|
|
||||||
// create e2 with what's left of e
|
// create e2 with what's left of e
|
||||||
|
@ -109,9 +109,34 @@ namespace gtsam {
|
||||||
|
|
||||||
// get A2 part,
|
// get A2 part,
|
||||||
VectorConfig x = *Ab2_ ^ e2; // x = A2'*e2
|
VectorConfig x = *Ab2_ ^ e2; // x = A2'*e2
|
||||||
VectorConfig y2 = gtsam::backSubstituteTranspose(*Rc1_, x); // inv(R1')*x;
|
y += gtsam::backSubstituteTranspose(*Rc1_, x); // inv(R1')*x;
|
||||||
|
|
||||||
return y1 + y2;
|
return y;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
// y += alpha*A'*e
|
||||||
|
// TODO avoid code duplication
|
||||||
|
void SubgraphPreconditioner::transposeMultiplyAdd
|
||||||
|
(double alpha, const Errors& e, VectorConfig& y) const {
|
||||||
|
|
||||||
|
// Use BayesNet order to remove y contributions in order
|
||||||
|
Errors::const_iterator it = e.begin();
|
||||||
|
BOOST_FOREACH(GaussianConditional::shared_ptr cg, *Rc1_) {
|
||||||
|
const Symbol& j = cg->key();
|
||||||
|
const Vector& ej = *(it++);
|
||||||
|
Vector& yj = y.getReference(j);
|
||||||
|
axpy(alpha,ej,yj);
|
||||||
|
}
|
||||||
|
|
||||||
|
// create e2 with what's left of e
|
||||||
|
Errors e2;
|
||||||
|
while (it != e.end())
|
||||||
|
e2.push_back(*(it++));
|
||||||
|
|
||||||
|
// get A2 part,
|
||||||
|
VectorConfig x = *Ab2_ ^ e2; // x = A2'*e2
|
||||||
|
y += alpha * gtsam::backSubstituteTranspose(*Rc1_, x); // inv(R1')*x;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
|
@ -70,6 +70,9 @@ namespace gtsam {
|
||||||
/** Apply operator A' */
|
/** Apply operator A' */
|
||||||
VectorConfig operator^(const Errors& e) const;
|
VectorConfig operator^(const Errors& e) const;
|
||||||
|
|
||||||
|
/** y += alpha*A'*e */
|
||||||
|
void transposeMultiplyAdd(double alpha, const Errors& e, VectorConfig& y) const;
|
||||||
|
|
||||||
/** print the object */
|
/** print the object */
|
||||||
void print(const std::string& s = "SubgraphPreconditioner") const;
|
void print(const std::string& s = "SubgraphPreconditioner") const;
|
||||||
};
|
};
|
||||||
|
|
|
@ -54,7 +54,8 @@ namespace gtsam {
|
||||||
if (k%reset==0)
|
if (k%reset==0)
|
||||||
g = Ab.gradient(x);
|
g = Ab.gradient(x);
|
||||||
else
|
else
|
||||||
axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad)
|
// axpy(alpha, Ab ^ Ad, g); // g += alpha*(Ab^Ad)
|
||||||
|
Ab.transposeMultiplyAdd(alpha, Ad, g);
|
||||||
|
|
||||||
// check for convergence
|
// check for convergence
|
||||||
double gamma = dot(g, g);
|
double gamma = dot(g, g);
|
||||||
|
|
|
@ -61,6 +61,11 @@ namespace gtsam {
|
||||||
return A_ ^ e;
|
return A_ ^ e;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** x += alpha* A_^T */
|
||||||
|
inline void transposeMultiplyAdd(double alpha, const Vector& e, Vector& x) const {
|
||||||
|
x += alpha * A_ ^ e;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Print with optional string
|
* Print with optional string
|
||||||
*/
|
*/
|
||||||
|
|
Loading…
Reference in New Issue