CGD method now uses CGD template in iterative.cpp
parent
863ee58c0f
commit
bc27afc49f
|
@ -16,6 +16,7 @@
|
|||
#include "GaussianFactorSet.h"
|
||||
#include "FactorGraph-inl.h"
|
||||
#include "inference-inl.h"
|
||||
#include "iterative.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace gtsam;
|
||||
|
@ -294,45 +295,22 @@ VectorConfig GaussianFactorGraph::gradientDescent(const VectorConfig& x0) const
|
|||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
// directions are actually negative of those in cgd.lyx
|
||||
VectorConfig GaussianFactorGraph::conjugateGradientDescent(
|
||||
boost::shared_ptr<VectorConfig> GaussianFactorGraph::gradientDescent_(
|
||||
const VectorConfig& x0) const {
|
||||
|
||||
// take first step in direction of the gradient
|
||||
VectorConfig d = gradient(x0);
|
||||
VectorConfig x = optimalUpdate(x0,d);
|
||||
double prev_dotg = dot(d,d);
|
||||
|
||||
// loop over remaining (n-1) dimensions
|
||||
int n = d.dim();
|
||||
for (int k=2;k<=n;k++) {
|
||||
|
||||
// calculate gradient and check for convergence
|
||||
VectorConfig gk = gradient(x);
|
||||
double dotg = dot(gk,gk);
|
||||
if (dotg<1e-9) break;
|
||||
|
||||
// calculate new search direction
|
||||
double beta = dotg/prev_dotg;
|
||||
prev_dotg = dotg;
|
||||
d = gk + d * beta;
|
||||
|
||||
// do step in new search direction
|
||||
x = optimalUpdate(x,d);
|
||||
}
|
||||
return x;
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
boost::shared_ptr<VectorConfig>
|
||||
GaussianFactorGraph::gradientDescent_(const VectorConfig& x0) const {
|
||||
return boost::shared_ptr<VectorConfig>(new VectorConfig(gradientDescent(x0)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
boost::shared_ptr<VectorConfig>
|
||||
GaussianFactorGraph::conjugateGradientDescent_(const VectorConfig& x0) const {
|
||||
return boost::shared_ptr<VectorConfig>(new VectorConfig(conjugateGradientDescent(x0)));
|
||||
VectorConfig GaussianFactorGraph::conjugateGradientDescent(
|
||||
const VectorConfig& x0, double threshold) const {
|
||||
return gtsam::conjugateGradientDescent(*this, x0, threshold);
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
boost::shared_ptr<VectorConfig> GaussianFactorGraph::conjugateGradientDescent_(
|
||||
const VectorConfig& x0, double threshold) const {
|
||||
return boost::shared_ptr<VectorConfig>(new VectorConfig(
|
||||
gtsam::conjugateGradientDescent(*this, x0, threshold)));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
|
|
|
@ -185,25 +185,31 @@ namespace gtsam {
|
|||
VectorConfig optimalUpdate(const VectorConfig& x0, const VectorConfig& d) const;
|
||||
|
||||
/**
|
||||
* Find solution using gradient descent
|
||||
* @param x0: VectorConfig specifying initial estimate
|
||||
* @return solution
|
||||
*/
|
||||
VectorConfig gradientDescent(const VectorConfig& x0) const;
|
||||
/**
|
||||
* shared pointer versions for MATLAB
|
||||
*/
|
||||
boost::shared_ptr<VectorConfig>gradientDescent_(const VectorConfig& x0) const;
|
||||
/**
|
||||
* Find solution using conjugate gradient descent
|
||||
* @param x0: VectorConfig specifying initial estimate
|
||||
* @return solution
|
||||
*/
|
||||
VectorConfig conjugateGradientDescent(const VectorConfig& x0) const;
|
||||
/**
|
||||
* shared pointer versions for MATLAB
|
||||
*/
|
||||
boost::shared_ptr<VectorConfig> conjugateGradientDescent_(const VectorConfig& x0) const;
|
||||
* Find solution using gradient descent
|
||||
* @param x0: VectorConfig specifying initial estimate
|
||||
* @return solution
|
||||
*/
|
||||
VectorConfig gradientDescent(const VectorConfig& x0) const;
|
||||
|
||||
/**
|
||||
* shared pointer versions for MATLAB
|
||||
*/
|
||||
boost::shared_ptr<VectorConfig>
|
||||
gradientDescent_(const VectorConfig& x0) const;
|
||||
|
||||
/**
|
||||
* Find solution using conjugate gradient descent
|
||||
* @param x0: VectorConfig specifying initial estimate
|
||||
* @return solution
|
||||
*/
|
||||
VectorConfig conjugateGradientDescent(const VectorConfig& x0,
|
||||
double threshold = 1e-9) const;
|
||||
|
||||
/**
|
||||
* shared pointer versions for MATLAB
|
||||
*/
|
||||
boost::shared_ptr<VectorConfig> conjugateGradientDescent_(
|
||||
const VectorConfig& x0, double threshold = 1e-9) const;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -21,37 +21,55 @@ TEST( Iterative, gradientDescent )
|
|||
{
|
||||
// Expected solution
|
||||
Ordering ord;
|
||||
ord += "l1","x1","x2";
|
||||
ord += "l1", "x1", "x2";
|
||||
GaussianFactorGraph fg = createGaussianFactorGraph();
|
||||
VectorConfig expected = fg.optimize(ord); // destructive
|
||||
VectorConfig expected = fg.optimize(ord); // destructive
|
||||
|
||||
// Do gradient descent
|
||||
GaussianFactorGraph fg2 = createGaussianFactorGraph();
|
||||
VectorConfig zero = createZeroDelta();
|
||||
// Do gradient descent
|
||||
GaussianFactorGraph fg2 = createGaussianFactorGraph();
|
||||
VectorConfig zero = createZeroDelta();
|
||||
VectorConfig actual = fg2.gradientDescent(zero);
|
||||
CHECK(assert_equal(expected,actual,1e-2));
|
||||
|
||||
// Do conjugate gradient descent
|
||||
//VectorConfig actual2 = fg2.conjugateGradientDescent(zero);
|
||||
VectorConfig actual2 = conjugateGradientDescent(fg2,zero);
|
||||
CHECK(assert_equal(expected,actual2,1e-2));
|
||||
|
||||
// Do conjugate gradient descent, Matrix version
|
||||
Matrix A;Vector b;
|
||||
boost::tie(A,b) = fg2.matrix(ord);
|
||||
// print(A,"A");
|
||||
// print(b,"b");
|
||||
Vector x0 = gtsam::zero(6);
|
||||
Vector actualX = conjugateGradientDescent(A,b,x0);
|
||||
Vector expectedX = Vector_(6, -0.1, 0.1, -0.1, -0.1, 0.1, -0.2);
|
||||
CHECK(assert_equal(expectedX,actualX,1e-9));
|
||||
|
||||
// Do conjugate gradient descent, System version
|
||||
System Ab = make_pair(A,b);
|
||||
Vector actualX2 = conjugateGradientDescent(Ab,x0);
|
||||
CHECK(assert_equal(expectedX,actualX2,1e-9));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
|
||||
TEST( Iterative, conjugateGradientDescent )
|
||||
{
|
||||
// Expected solution
|
||||
Ordering ord;
|
||||
ord += "l1", "x1", "x2";
|
||||
GaussianFactorGraph fg = createGaussianFactorGraph();
|
||||
VectorConfig expected = fg.optimize(ord); // destructive
|
||||
|
||||
// create graph and get matrices
|
||||
GaussianFactorGraph fg2 = createGaussianFactorGraph();
|
||||
Matrix A; Vector b;
|
||||
Vector x0 = gtsam::zero(6);
|
||||
boost::tie(A, b) = fg2.matrix(ord);
|
||||
Vector expectedX = Vector_(6, -0.1, 0.1, -0.1, -0.1, 0.1, -0.2);
|
||||
|
||||
// Do conjugate gradient descent, System version
|
||||
System Ab = make_pair(A, b);
|
||||
Vector actualX = conjugateGradientDescent(Ab, x0);
|
||||
CHECK(assert_equal(expectedX,actualX,1e-9));
|
||||
|
||||
// Do conjugate gradient descent, Matrix version
|
||||
Vector actualX2 = conjugateGradientDescent(A, b, x0);
|
||||
CHECK(assert_equal(expectedX,actualX2,1e-9));
|
||||
|
||||
// Do conjugate gradient descent on factor graph
|
||||
VectorConfig zero = createZeroDelta();
|
||||
VectorConfig actual = conjugateGradientDescent(fg2, zero);
|
||||
CHECK(assert_equal(expected,actual,1e-2));
|
||||
|
||||
// Test method
|
||||
VectorConfig actual2 = fg2.conjugateGradientDescent(zero);
|
||||
CHECK(assert_equal(expected,actual2,1e-2));
|
||||
}
|
||||
|
||||
/* ************************************************************************* */
|
||||
int main() {
|
||||
TestResult tr;
|
||||
return TestRegistry::runAllTests(tr);
|
||||
}
|
||||
/* ************************************************************************* */
|
||||
|
|
Loading…
Reference in New Issue