template now exposed in iterative.h, and has its own implementation file
parent
5ac304aff3
commit
0c0b73042b
|
@ -100,7 +100,7 @@ testBinaryBayesNet_SOURCES = testBinaryBayesNet.cpp
|
||||||
testBinaryBayesNet_LDADD = libgtsam.la
|
testBinaryBayesNet_LDADD = libgtsam.la
|
||||||
|
|
||||||
# Gaussian inference
|
# Gaussian inference
|
||||||
headers += GaussianFactorSet.h
|
headers += GaussianFactorSet.h iterative-inl.h
|
||||||
sources += Errors.cpp VectorConfig.cpp GaussianFactor.cpp GaussianFactorGraph.cpp GaussianConditional.cpp GaussianBayesNet.cpp iterative.cpp
|
sources += Errors.cpp VectorConfig.cpp GaussianFactor.cpp GaussianFactorGraph.cpp GaussianConditional.cpp GaussianBayesNet.cpp iterative.cpp
|
||||||
check_PROGRAMS += testVectorConfig testGaussianFactor testGaussianFactorGraph testGaussianConditional testGaussianBayesNet testIterative
|
check_PROGRAMS += testVectorConfig testGaussianFactor testGaussianFactorGraph testGaussianConditional testGaussianBayesNet testIterative
|
||||||
testVectorConfig_SOURCES = testVectorConfig.cpp
|
testVectorConfig_SOURCES = testVectorConfig.cpp
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
/*
|
||||||
|
* iterative-inl.h
|
||||||
|
* @brief Iterative methods, template implementation
|
||||||
|
* @author Frank Dellaert
|
||||||
|
* Created on: Dec 28, 2009
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "GaussianFactorGraph.h"
|
||||||
|
#include "iterative.h"
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
|
||||||
|
namespace gtsam {
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
template<class S, class V, class E>
|
||||||
|
V conjugateGradients(const S& Ab, V x, bool verbose, double epsilon,
|
||||||
|
size_t maxIterations, bool steepest = false) {
|
||||||
|
|
||||||
|
if (maxIterations == 0) maxIterations = dim(x) * (steepest ? 10 : 1);
|
||||||
|
|
||||||
|
// Start with g0 = A'*(A*x0-b), d0 = - g0
|
||||||
|
// i.e., first step is in direction of negative gradient
|
||||||
|
V g = gradient(Ab, x);
|
||||||
|
V d = -g;
|
||||||
|
double dotg0 = dot(g, g), prev_dotg = dotg0;
|
||||||
|
double threshold = epsilon * epsilon * dotg0;
|
||||||
|
|
||||||
|
if (verbose) cout << "CG: epsilon = " << epsilon << ", maxIterations = "
|
||||||
|
<< maxIterations << ", ||g0||^2 = " << dotg0 << ", threshold = "
|
||||||
|
<< threshold << endl;
|
||||||
|
|
||||||
|
// loop maxIterations times
|
||||||
|
for (size_t k = 0; k < maxIterations; k++) {
|
||||||
|
|
||||||
|
// calculate optimal step-size
|
||||||
|
E Ad = Ab * d;
|
||||||
|
double alpha = -dot(d, g) / dot(Ad, Ad);
|
||||||
|
|
||||||
|
// do step in new search direction
|
||||||
|
x = x + alpha * d;
|
||||||
|
|
||||||
|
// update gradient
|
||||||
|
g = g + alpha * (Ab ^ Ad);
|
||||||
|
|
||||||
|
// check for convergence
|
||||||
|
double dotg = dot(g, g);
|
||||||
|
if (verbose) cout << "iteration " << k << ": alpha = " << alpha
|
||||||
|
<< ", dotg = " << dotg << endl;
|
||||||
|
if (dotg < threshold) break;
|
||||||
|
|
||||||
|
// calculate new search direction
|
||||||
|
if (steepest)
|
||||||
|
d = -g;
|
||||||
|
else {
|
||||||
|
double beta = dotg / prev_dotg;
|
||||||
|
prev_dotg = dotg;
|
||||||
|
d = -g + beta * d;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ************************************************************************* */
|
||||||
|
|
||||||
|
} // namespace gtsam
|
|
@ -6,66 +6,12 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "GaussianFactorGraph.h"
|
#include "GaussianFactorGraph.h"
|
||||||
#include "iterative.h"
|
#include "iterative-inl.h"
|
||||||
|
|
||||||
using namespace std;
|
using namespace std;
|
||||||
|
|
||||||
namespace gtsam {
|
namespace gtsam {
|
||||||
|
|
||||||
/* ************************************************************************* */
|
|
||||||
// Method of conjugate gradients (CG) template
|
|
||||||
// "System" class S needs gradient(S,v), e=S*v, v=S^e
|
|
||||||
// "Vector" class V needs dot(v,v), -v, v+v, s*v
|
|
||||||
// "Vector" class E needs dot(v,v)
|
|
||||||
// if (steepest) does steepest descent
|
|
||||||
template<class S, class V, class E>
|
|
||||||
V conjugateGradients(const S& Ab, V x, bool verbose, double epsilon,
|
|
||||||
size_t maxIterations, bool steepest = false) {
|
|
||||||
|
|
||||||
if (maxIterations == 0) maxIterations = dim(x) * (steepest ? 10 : 1);
|
|
||||||
|
|
||||||
// Start with g0 = A'*(A*x0-b), d0 = - g0
|
|
||||||
// i.e., first step is in direction of negative gradient
|
|
||||||
V g = gradient(Ab, x);
|
|
||||||
V d = -g;
|
|
||||||
double dotg0 = dot(g, g), prev_dotg = dotg0;
|
|
||||||
double threshold = epsilon * epsilon * dotg0;
|
|
||||||
|
|
||||||
if (verbose) cout << "CG: epsilon = " << epsilon << ", maxIterations = "
|
|
||||||
<< maxIterations << ", ||g0||^2 = " << dotg0 << ", threshold = "
|
|
||||||
<< threshold << endl;
|
|
||||||
|
|
||||||
// loop maxIterations times
|
|
||||||
for (size_t k = 0; k < maxIterations; k++) {
|
|
||||||
|
|
||||||
// calculate optimal step-size
|
|
||||||
E Ad = Ab * d;
|
|
||||||
double alpha = -dot(d, g) / dot(Ad, Ad);
|
|
||||||
|
|
||||||
// do step in new search direction
|
|
||||||
x = x + alpha * d;
|
|
||||||
|
|
||||||
// update gradient
|
|
||||||
g = g + alpha * (Ab ^ Ad);
|
|
||||||
|
|
||||||
// check for convergence
|
|
||||||
double dotg = dot(g, g);
|
|
||||||
if (verbose) cout << "iteration " << k << ": alpha = " << alpha
|
|
||||||
<< ", dotg = " << dotg << endl;
|
|
||||||
if (dotg < threshold) break;
|
|
||||||
|
|
||||||
// calculate new search direction
|
|
||||||
if (steepest)
|
|
||||||
d = -g;
|
|
||||||
else {
|
|
||||||
double beta = dotg / prev_dotg;
|
|
||||||
prev_dotg = dotg;
|
|
||||||
d = -g + beta * d;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return x;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ************************************************************************* */
|
/* ************************************************************************* */
|
||||||
|
|
||||||
/** gradient of objective function 0.5*|Ax-b|^2 at x = A'*(Ax-b) */
|
/** gradient of objective function 0.5*|Ax-b|^2 at x = A'*(Ax-b) */
|
||||||
|
|
|
@ -15,10 +15,19 @@ namespace gtsam {
|
||||||
typedef std::pair<Matrix, Vector> System;
|
typedef std::pair<Matrix, Vector> System;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* In all calls below
|
* Method of conjugate gradients (CG) template
|
||||||
* x is the initial estimate
|
* "System" class S needs gradient(S,v), e=S*v, v=S^e
|
||||||
* epsilon determines the convergence criterion: norm(g)<epsilon*norm(g0)
|
* "Vector" class V needs dot(v,v), -v, v+v, s*v
|
||||||
*/
|
* "Vector" class E needs dot(v,v)
|
||||||
|
* @param Ab, the "system" that needs to be solved, examples below
|
||||||
|
* @param x is the initial estimate
|
||||||
|
* @param epsilon determines the convergence criterion: norm(g)<epsilon*norm(g0)
|
||||||
|
* @param maxIterations, if 0 will be set to |x|
|
||||||
|
* @param steepest flag, if true does steepest descent, not CG
|
||||||
|
* */
|
||||||
|
template<class S, class V, class E>
|
||||||
|
V conjugateGradients(const S& Ab, V x, bool verbose, double epsilon,
|
||||||
|
size_t maxIterations, bool steepest = false);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Method of steepest gradients, System version
|
* Method of steepest gradients, System version
|
||||||
|
|
Loading…
Reference in New Issue