add linear system as a template parameter in nonlinear optimizer

fixed a nasty bug and change the internal data type of subgraph preconditioner from reference to boost shared pointer. reference is not a good idea for class members, because no type checking will happen
release/4.3a0
Kai Ni 2010-01-19 10:46:12 +00:00
parent 2eac3b7235
commit 3806125096
13 changed files with 282 additions and 272 deletions

View File

@ -60,12 +60,18 @@ void push_front(GaussianBayesNet& bn, const Symbol& key, Vector d, Matrix R,
/* ************************************************************************* */
VectorConfig optimize(const GaussianBayesNet& bn)
{
VectorConfig result;
return *optimize_(bn);
}
/* ************************************************************************* */
boost::shared_ptr<VectorConfig> optimize_(const GaussianBayesNet& bn)
{
boost::shared_ptr<VectorConfig> result(new VectorConfig);
/** solve each node in turn in topological sort order (parents first)*/
BOOST_REVERSE_FOREACH(GaussianConditional::shared_ptr cg, bn) {
Vector x = cg->solve(result); // Solve for that variable
result.insert(cg->key(),x); // store result in partial solution
Vector x = cg->solve(*result); // Solve for that variable
result->insert(cg->key(),x); // store result in partial solution
}
return result;
}

View File

@ -45,6 +45,11 @@ namespace gtsam {
*/
VectorConfig optimize(const GaussianBayesNet&);
/**
* shared pointer version
*/
boost::shared_ptr<VectorConfig> optimize_(const GaussianBayesNet& bn);
/*
* Backsubstitute
* (R*x)./sigmas = y by solving x=inv(R)*(y.*sigmas)

View File

@ -42,9 +42,14 @@ double GaussianFactorGraph::error(const VectorConfig& x) const {
/* ************************************************************************* */
Errors GaussianFactorGraph::errors(const VectorConfig& x) const {
Errors e;
return *errors_(x);
}
/* ************************************************************************* */
boost::shared_ptr<Errors> GaussianFactorGraph::errors_(const VectorConfig& x) const {
boost::shared_ptr<Errors> e(new Errors);
BOOST_FOREACH(sharedFactor factor,factors_)
e.push_back(factor->error_vector(x));
e->push_back(factor->error_vector(x));
return e;
}

View File

@ -76,6 +76,9 @@ namespace gtsam {
/** return A*x-b */
Errors errors(const VectorConfig& x) const;
/** shared pointer version */
boost::shared_ptr<Errors> errors_(const VectorConfig& x) const;
/** unnormalized error */
double error(const VectorConfig& x) const;
@ -116,7 +119,7 @@ namespace gtsam {
* that does not completely eliminate the graph
*/
GaussianBayesNet eliminate(const Ordering& ordering);
/**
* optimize a linear factor graph
* @param ordering fg in order
@ -239,12 +242,10 @@ namespace gtsam {
}
/**
* linearize the non-linear graph around the current config,
* linearize the non-linear graph around the current config
*/
VectorConfig linearizeAndOptimize(const NonlinearGraph& g, const Config& config,
const Ordering& ordering) const {
GaussianFactorGraph linear = g.linearize(config);
return optimize(linear, ordering);
GaussianFactorGraph linearize(const NonlinearGraph& g, const Config& config) const {
return g.linearize(config);
}
};
}

View File

@ -46,38 +46,28 @@ namespace gtsam {
/* ************************************************************************* */
// Constructors without the solver
/* ************************************************************************* */
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS>::NonlinearOptimizer(shared_graph graph,
template<class G, class C, class L, class S>
NonlinearOptimizer<G, C, L, S>::NonlinearOptimizer(shared_graph graph,
shared_ordering ordering, shared_config config, shared_solver solver, double lambda) :
graph_(graph), ordering_(ordering), config_(config), error_(graph->error(
*config)), lambda_(lambda), solver_(solver) {
}
/* ************************************************************************* */
// Constructors without the solver
/* ************************************************************************* */
// template<class G, class C, class LS>
// NonlinearOptimizer<G, C, LS>::NonlinearOptimizer(shared_graph graph,
// shared_ordering ordering, shared_config config, double lambda) :
// graph_(graph), ordering_(ordering), config_(config), error_(graph->error(
// *config)), lambda_(lambda), solver_(new LS(*graph, *config)){
// }
/* ************************************************************************* */
// linearize and optimize
/* ************************************************************************* */
template<class G, class C, class LS>
VectorConfig NonlinearOptimizer<G, C, LS>::linearizeAndOptimizeForDelta() const {
return solver_->linearizeAndOptimize(*graph_, *config_, *ordering_);
template<class G, class C, class L, class S>
VectorConfig NonlinearOptimizer<G, C, L, S>::linearizeAndOptimizeForDelta() const {
L linearized = solver_->linearize(*graph_, *config_);
return solver_->optimize(linearized, *ordering_);
}
/* ************************************************************************* */
// One iteration of Gauss Newton
/* ************************************************************************* */
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS> NonlinearOptimizer<G, C, LS>::iterate(
template<class G, class C, class L, class S>
NonlinearOptimizer<G, C, L, S> NonlinearOptimizer<G, C, L, S>::iterate(
verbosityLevel verbosity) const {
// linearize and optimize
VectorConfig delta = linearizeAndOptimizeForDelta();
@ -101,8 +91,8 @@ namespace gtsam {
}
/* ************************************************************************* */
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS> NonlinearOptimizer<G, C, LS>::gaussNewton(
template<class G, class C, class L, class S>
NonlinearOptimizer<G, C, L, S> NonlinearOptimizer<G, C, L, S>::gaussNewton(
double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity, int maxIterations) const {
// linearize, solve, update
@ -125,15 +115,15 @@ namespace gtsam {
// optimizer if error decreased or recurse with a larger lambda if not.
// TODO: in theory we can't infinitely recurse, but maybe we should put a max.
/* ************************************************************************* */
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS> NonlinearOptimizer<G, C, LS>::try_lambda(
const GaussianFactorGraph& linear, verbosityLevel verbosity, double factor) const {
template<class G, class C, class L, class S>
NonlinearOptimizer<G, C, L, S> NonlinearOptimizer<G, C, L, S>::try_lambda(
const L& linear, verbosityLevel verbosity, double factor) const {
if (verbosity >= TRYLAMBDA)
cout << "trying lambda = " << lambda_ << endl;
// add prior-factors
GaussianFactorGraph damped = linear.add_priors(1.0/sqrt(lambda_));
L damped = linear.add_priors(1.0/sqrt(lambda_));
if (verbosity >= DAMPED)
damped.print("damped");
@ -163,8 +153,8 @@ namespace gtsam {
/* ************************************************************************* */
// One iteration of Levenberg Marquardt
/* ************************************************************************* */
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS> NonlinearOptimizer<G, C, LS>::iterateLM(
template<class G, class C, class L, class S>
NonlinearOptimizer<G, C, L, S> NonlinearOptimizer<G, C, L, S>::iterateLM(
verbosityLevel verbosity, double lambdaFactor) const {
// maybe show output
@ -176,7 +166,7 @@ namespace gtsam {
cout << "lambda = " << lambda_ << endl;
// linearize all factors once
GaussianFactorGraph linear = graph_->linearize(*config_);
L linear = graph_->linearize(*config_);
if (verbosity >= LINEAR)
linear.print("linear");
@ -185,8 +175,8 @@ namespace gtsam {
}
/* ************************************************************************* */
template<class G, class C, class LS>
NonlinearOptimizer<G, C, LS> NonlinearOptimizer<G, C, LS>::levenbergMarquardt(
template<class G, class C, class L, class S>
NonlinearOptimizer<G, C, L, S> NonlinearOptimizer<G, C, L, S>::levenbergMarquardt(
double relativeThreshold, double absoluteThreshold,
verbosityLevel verbosity, int maxIterations, double lambdaFactor) const {

View File

@ -20,20 +20,21 @@ namespace gtsam {
* and then one of the optimization routines is called. These recursively iterate
* until convergence. All methods are functional and return a new state.
*
* The class is parameterized by the Graph type, Config class type and the linear solver type.
* The class is parameterized by the Graph type $G$, Config class type $T$,
* linear system class $L$ and the non linear solver type $S$.
* the config type is in order to be able to optimize over non-vector configurations as well.
* To use in code, include <gtsam/NonlinearOptimizer-inl.h> in your cpp file
* (the trick in http://www.ddj.com/cpp/184403420 did not work).
*/
template<class FactorGraph, class Config, class LinearSolver = Factorization<FactorGraph, Config> >
template<class G, class T, class L = GaussianFactorGraph, class S = Factorization<G, T> >
class NonlinearOptimizer {
public:
// For performance reasons in recursion, we store configs in a shared_ptr
typedef boost::shared_ptr<const Config> shared_config;
typedef boost::shared_ptr<const FactorGraph> shared_graph;
typedef boost::shared_ptr<const T> shared_config;
typedef boost::shared_ptr<const G> shared_graph;
typedef boost::shared_ptr<const Ordering> shared_ordering;
typedef boost::shared_ptr<const LinearSolver> shared_solver;
typedef boost::shared_ptr<const S> shared_solver;
enum verbosityLevel {
SILENT,
@ -68,7 +69,7 @@ namespace gtsam {
const shared_solver solver_;
// Recursively try to do tempered Gauss-Newton steps until we succeed
NonlinearOptimizer try_lambda(const GaussianFactorGraph& linear,
NonlinearOptimizer try_lambda(const L& linear,
verbosityLevel verbosity, double factor) const;
public:
@ -77,13 +78,13 @@ namespace gtsam {
* Constructor
*/
NonlinearOptimizer(shared_graph graph, shared_ordering ordering,
shared_config config, shared_solver solver = shared_solver(new LinearSolver),
shared_config config, shared_solver solver = shared_solver(new S),
double lambda = 1e-5);
/**
* Copy constructor
*/
NonlinearOptimizer(const NonlinearOptimizer<FactorGraph, Config> &optimizer) :
NonlinearOptimizer(const NonlinearOptimizer<G, T, L, S> &optimizer) :
graph_(optimizer.graph_), ordering_(optimizer.ordering_), config_(optimizer.config_),
error_(optimizer.error_), lambda_(optimizer.lambda_), solver_(optimizer.solver_) {}
@ -110,7 +111,7 @@ namespace gtsam {
/**
* linearize and optimize
* This returns an VectorConfig, i.e., vectors in tangent space of Config
* This returns an VectorConfig, i.e., vectors in tangent space of T
*/
VectorConfig linearizeAndOptimizeForDelta() const;

View File

@ -20,17 +20,17 @@ using namespace std;
namespace gtsam {
/* ************************************************************************* */
template<class Graph, class Config>
SubgraphPCG<Graph, Config>::SubgraphPCG(const Graph& G, const Config& config) :
template<class G, class T>
SubgraphPCG<G, T>::SubgraphPCG(const G& g, const T& theta0) :
maxIterations_(100), verbose_(false), epsilon_(1e-4), epsilon_abs_(1e-5) {
// generate spanning tree and create ordering
PredecessorMap<Key> tree = G.template findMinimumSpanningTree<Key, Constraint>();
PredecessorMap<Key> tree = g.template findMinimumSpanningTree<Key, Constraint>();
list<Key> keys = predecessorMap2Keys(tree);
// split the graph
if (verbose_) cout << "generating spanning tree and split the graph ...";
G.template split<Key, Constraint>(tree, T_, C_);
g.template split<Key, Constraint>(tree, T_, C_);
if (verbose_) cout << T_.size() << " and " << C_.size() << " factors" << endl;
// make the ordering
@ -41,30 +41,32 @@ namespace gtsam {
// compose the approximate solution
Key root = keys.back();
theta_bar_ = composePoses<Graph, Constraint, Pose, Config> (T_, tree, config[root]);
theta_bar_ = composePoses<G, Constraint, Pose, T> (T_, tree, theta0[root]);
}
/* ************************************************************************* */
template<class Graph, class Config>
VectorConfig SubgraphPCG<Graph, Config>::linearizeAndOptimize(const Graph& g,
const Config& theta_bar, const Ordering& ordering) const {
template<class G, class T>
SubgraphPreconditioner SubgraphPCG<G, T>::linearize(const G& g, const T& theta_bar) const {
GaussianFactorGraph Ab1 = T_.linearize(theta_bar);
SubgraphPreconditioner::sharedFG Ab2 = C_.linearize_(theta_bar);
SubgraphPreconditioner::sharedBayesNet Rc1 = Ab1.eliminate_(*ordering_);
SubgraphPreconditioner::sharedConfig xbar = gtsam::optimize_(*Rc1);
return SubgraphPreconditioner(Rc1, Ab2, xbar);
}
/* ************************************************************************* */
template<class G, class T>
VectorConfig SubgraphPCG<G, T>::optimize(SubgraphPreconditioner& system, const Ordering& ordering) const {
//TODO: 3 is hard coded here
VectorConfig zeros;
BOOST_FOREACH(const Symbol& j, ordering) zeros.insert(j,zero(3));
// build the subgraph PCG system
GaussianFactorGraph Ab1 = T_.linearize(theta_bar);
GaussianFactorGraph Ab2 = C_.linearize(theta_bar);
const GaussianBayesNet Rc1 = Ab1.eliminate(ordering);
VectorConfig xbar = gtsam::optimize(Rc1);
SubgraphPreconditioner system(Rc1, Ab2, xbar);
// Solve the subgraph PCG
VectorConfig ybar = conjugateGradients<SubgraphPreconditioner, VectorConfig,
Errors> (system, zeros, verbose_, epsilon_, epsilon_abs_, maxIterations_);
VectorConfig xbar2 = system.x(ybar);
return xbar2;
VectorConfig xbar = system.x(ybar);
return xbar;
}
}

View File

@ -12,15 +12,14 @@ using namespace std;
namespace gtsam {
/* ************************************************************************* */
SubgraphPreconditioner::SubgraphPreconditioner(const GaussianBayesNet& Rc1,
const GaussianFactorGraph& Ab2, const VectorConfig& xbar) :
Rc1_(Rc1), Ab2_(Ab2), xbar_(xbar), b2bar_(Ab2_.errors(xbar)) {
SubgraphPreconditioner::SubgraphPreconditioner(sharedBayesNet& Rc1, sharedFG& Ab2, sharedConfig& xbar) :
Rc1_(Rc1), Ab2_(Ab2), xbar_(xbar), b2bar_(Ab2_->errors_(*xbar)) {
}
/* ************************************************************************* */
// x = xbar + inv(R1)*y
VectorConfig SubgraphPreconditioner::x(const VectorConfig& y) const {
return xbar_ + gtsam::backSubstitute(Rc1_, y);
return *xbar_ + gtsam::backSubstitute(*Rc1_, y);
}
/* ************************************************************************* */
@ -29,14 +28,14 @@ namespace gtsam {
Errors e;
// Use BayesNet order to add y contributions in order
BOOST_FOREACH(GaussianConditional::shared_ptr cg, Rc1_) {
BOOST_FOREACH(GaussianConditional::shared_ptr cg, *Rc1_) {
const Symbol& j = cg->key();
e.push_back(y[j]); // append y
}
// Add A2 contribution
VectorConfig x = this->x(y);
Errors e2 = Ab2_.errors(x);
Errors e2 = Ab2_->errors(x);
e.splice(e.end(), e2);
return 0.5 * dot(e, e);
@ -46,8 +45,8 @@ namespace gtsam {
// gradient is y + inv(R1')*A2'*(A2*inv(R1)*y-b2bar),
VectorConfig SubgraphPreconditioner::gradient(const VectorConfig& y) const {
VectorConfig x = this->x(y); // x = inv(R1)*y
VectorConfig gx2 = Ab2_ ^ Ab2_.errors(x);
VectorConfig gy2 = gtsam::backSubstituteTranspose(Rc1_, gx2); // inv(R1')*gx2
VectorConfig gx2 = *Ab2_ ^ Ab2_->errors(x);
VectorConfig gy2 = gtsam::backSubstituteTranspose(*Rc1_, gx2); // inv(R1')*gx2
return y + gy2;
}
@ -58,14 +57,14 @@ namespace gtsam {
Errors e;
// Use BayesNet order to add y contributions in order
BOOST_FOREACH(GaussianConditional::shared_ptr cg, Rc1_) {
BOOST_FOREACH(GaussianConditional::shared_ptr cg, *Rc1_) {
const Symbol& j = cg->key();
e.push_back(y[j]); // append y
}
// Add A2 contribution
VectorConfig x = gtsam::backSubstitute(Rc1_, y); // x=inv(R1)*y
Errors e2 = Ab2_ * x; // A2*x
VectorConfig x = gtsam::backSubstitute(*Rc1_, y); // x=inv(R1)*y
Errors e2 = *Ab2_ * x; // A2*x
e.splice(e.end(), e2);
return e;
@ -79,7 +78,7 @@ namespace gtsam {
// Use BayesNet order to remove y contributions in order
Errors::const_iterator it = e.begin();
BOOST_FOREACH(GaussianConditional::shared_ptr cg, Rc1_) {
BOOST_FOREACH(GaussianConditional::shared_ptr cg, *Rc1_) {
const Symbol& j = cg->key();
const Vector& ej = *(it++);
y1.insert(j,ej);
@ -91,8 +90,8 @@ namespace gtsam {
e2.push_back(*(it++));
// get A2 part,
VectorConfig x = Ab2_ ^ e2; // x = A2'*e2
VectorConfig y2 = gtsam::backSubstituteTranspose(Rc1_, x); // inv(R1')*x;
VectorConfig x = *Ab2_ ^ e2; // x = A2'*e2
VectorConfig y2 = gtsam::backSubstituteTranspose(*Rc1_, x); // inv(R1')*x;
return y1 + y2;
}
@ -100,6 +99,6 @@ namespace gtsam {
/* ************************************************************************* */
void SubgraphPreconditioner::print(const std::string& s) const {
cout << s << endl;
Ab2_.print();
Ab2_->print();
}
} // nsamespace gtsam

View File

@ -22,13 +22,17 @@ namespace gtsam {
*/
class SubgraphPreconditioner {
public:
typedef boost::shared_ptr<const GaussianBayesNet> sharedBayesNet;
typedef boost::shared_ptr<const GaussianFactorGraph> sharedFG;
typedef boost::shared_ptr<const VectorConfig> sharedConfig;
typedef boost::shared_ptr<const Errors> sharedErrors;
private:
const GaussianBayesNet& Rc1_;
const GaussianFactorGraph& Ab2_;
const VectorConfig& xbar_;
const Errors b2bar_; /** b2 - A2*xbar */
sharedBayesNet Rc1_;
sharedFG Ab2_;
sharedConfig xbar_;
sharedErrors b2bar_; /** b2 - A2*xbar */
public:
@ -38,8 +42,7 @@ namespace gtsam {
* @param Ab2: the Graph A2*x=b2
* @param xbar: the solution to R1*x=c1
*/
SubgraphPreconditioner(const GaussianBayesNet& Rc1,
const GaussianFactorGraph& Ab2, const VectorConfig& xbar);
SubgraphPreconditioner(sharedBayesNet& Rc1, sharedFG& Ab2, sharedConfig& xbar);
/* x = xbar + inv(R1)*y */
VectorConfig x(const VectorConfig& y) const;
@ -61,18 +64,18 @@ namespace gtsam {
};
/**
* A linear system solver using subgraph preconditioning conjugate gradient
* A nonlinear system solver using subgraph preconditioning conjugate gradient
* Concept NonLinearSolver<G,T,L> implements
* linearize: G * T -> L
* solve : L -> VectorConfig
*/
template <class NonlinearGraph, class Config>
template<class G, class T>
class SubgraphPCG {
private:
typedef typename Config::Key Key;
typedef typename NonlinearGraph::Constraint Constraint;
typedef typename NonlinearGraph::Pose Pose;
typedef typename T::Key Key;
typedef typename G::Constraint Constraint;
typedef typename G::Pose Pose;
const size_t maxIterations_;
const bool verbose_;
@ -82,33 +85,30 @@ namespace gtsam {
boost::shared_ptr<Ordering> ordering_;
/* the solution computed from the first subgraph */
boost::shared_ptr<Config> theta_bar_;
boost::shared_ptr<T> theta_bar_;
NonlinearGraph T_, C_;
G T_, C_;
public:
// kai: this constructor is for compatible with Factorization
SubgraphPCG() { throw std::runtime_error("SubgraphPCG: this constructor is only for compatibility!");}
SubgraphPCG(const NonlinearGraph& G, const Config& config);
SubgraphPCG(const G& G, const T& config);
boost::shared_ptr<Ordering> ordering() const { return ordering_; }
boost::shared_ptr<Config> theta_bar() const { return theta_bar_; }
boost::shared_ptr<T> theta_bar() const { return theta_bar_; }
/**
* linearize the non-linear graph around the current config and build the subgraph preconditioner systme
*/
SubgraphPreconditioner linearize(const G& g, const T& theta_bar) const;
/**
* solve for the optimal displacement in the tangent space, and then solve
* the resulted linear system
*/
VectorConfig optimize(GaussianFactorGraph& fg, const Ordering& ordering) const {
throw std::runtime_error("SubgraphPCG:: optimize is not supported!");
}
/**
* linearize the non-linear graph around the current config,
*/
VectorConfig linearizeAndOptimize(const NonlinearGraph& g, const Config& config,
const Ordering& ordering) const;
VectorConfig optimize(SubgraphPreconditioner& system, const Ordering& ordering) const;
};
} // nsamespace gtsam

View File

@ -161,9 +161,9 @@ TEST( Iterative, subgraphPCG )
// build the subgraph PCG system
GaussianFactorGraph Ab1 = T.linearize(theta_bar);
GaussianFactorGraph Ab2 = C.linearize(theta_bar);
const GaussianBayesNet Rc1 = Ab1.eliminate(ordering);
VectorConfig xbar = optimize(Rc1);
SubgraphPreconditioner::sharedFG Ab2 = C.linearize_(theta_bar);
SubgraphPreconditioner::sharedBayesNet Rc1 = Ab1.eliminate_(ordering);
SubgraphPreconditioner::sharedConfig xbar = optimize_(*Rc1);
SubgraphPreconditioner system(Rc1, Ab2, xbar);
// Solve the subgraph PCG

View File

@ -35,157 +35,157 @@ using namespace example;
typedef NonlinearOptimizer<Graph,Config> Optimizer;
/* ************************************************************************* */
TEST( NonlinearOptimizer, delta )
{
shared_ptr<Graph> fg(new Graph(
createNonlinearFactorGraph()));
Optimizer::shared_config initial = sharedNoisyConfig();
// Expected configuration is the difference between the noisy config
// and the ground-truth config. One step only because it's linear !
VectorConfig expected;
Vector dl1(2);
dl1(0) = -0.1;
dl1(1) = 0.1;
expected.insert("l1", dl1);
Vector dx1(2);
dx1(0) = -0.1;
dx1(1) = -0.1;
expected.insert("x1", dx1);
Vector dx2(2);
dx2(0) = 0.1;
dx2(1) = -0.2;
expected.insert("x2", dx2);
// Check one ordering
shared_ptr<Ordering> ord1(new Ordering());
*ord1 += "x2","l1","x1";
Optimizer optimizer1(fg, ord1, initial);
VectorConfig actual1 = optimizer1.linearizeAndOptimizeForDelta();
CHECK(assert_equal(actual1,expected));
// Check another
shared_ptr<Ordering> ord2(new Ordering());
*ord2 += "x1","x2","l1";
Optimizer optimizer2(fg, ord2, initial);
VectorConfig actual2 = optimizer2.linearizeAndOptimizeForDelta();
CHECK(assert_equal(actual2,expected));
// And yet another...
shared_ptr<Ordering> ord3(new Ordering());
*ord3 += "l1","x1","x2";
Optimizer optimizer3(fg, ord3, initial);
VectorConfig actual3 = optimizer3.linearizeAndOptimizeForDelta();
CHECK(assert_equal(actual3,expected));
}
/* ************************************************************************* */
TEST( NonlinearOptimizer, iterateLM )
{
// really non-linear factor graph
shared_ptr<Graph> fg(new Graph(
createReallyNonlinearFactorGraph()));
// config far from minimum
Point2 x0(3,0);
boost::shared_ptr<Config> config(new Config);
config->insert(simulated2D::PoseKey(1), x0);
// ordering
shared_ptr<Ordering> ord(new Ordering());
ord->push_back("x1");
// create initial optimization state, with lambda=0
Optimizer::shared_solver solver(new Factorization<
Graph, Config> );
Optimizer optimizer(fg, ord, config, solver, 0.);
// normal iterate
Optimizer iterated1 = optimizer.iterate();
// LM iterate with lambda 0 should be the same
Optimizer iterated2 = optimizer.iterateLM();
// Try successive iterates. TODO: ugly pointers, better way ?
Optimizer *pointer = new Optimizer(iterated2);
for (int i=0;i<10;i++) {
Optimizer* newOptimizer = new Optimizer(pointer->iterateLM());
delete pointer;
pointer = newOptimizer;
}
delete(pointer);
CHECK(assert_equal(*iterated1.config(), *iterated2.config(), 1e-9));
}
/* ************************************************************************* */
TEST( NonlinearOptimizer, optimize )
{
shared_ptr<Graph> fg(new Graph(
createReallyNonlinearFactorGraph()));
// test error at minimum
Point2 xstar(0,0);
Config cstar;
cstar.insert(simulated2D::PoseKey(1), xstar);
DOUBLES_EQUAL(0.0,fg->error(cstar),0.0);
// test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
Point2 x0(3,3);
boost::shared_ptr<Config> c0(new Config);
c0->insert(simulated2D::PoseKey(1), x0);
DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3);
// optimize parameters
shared_ptr<Ordering> ord(new Ordering());
ord->push_back("x1");
double relativeThreshold = 1e-5;
double absoluteThreshold = 1e-5;
// initial optimization state is the same in both cases tested
Optimizer optimizer(fg, ord, c0);
// Gauss-Newton
Optimizer actual1 = optimizer.gaussNewton(relativeThreshold,
absoluteThreshold);
DOUBLES_EQUAL(0,fg->error(*(actual1.config())),1e-3);
// Levenberg-Marquardt
Optimizer actual2 = optimizer.levenbergMarquardt(relativeThreshold,
absoluteThreshold, Optimizer::SILENT);
DOUBLES_EQUAL(0,fg->error(*(actual2.config())),1e-3);
}
/* ************************************************************************* */
TEST( NonlinearOptimizer, Factorization )
{
typedef NonlinearOptimizer<Pose2Graph, Pose2Config, Factorization<Pose2Graph, Pose2Config> > Optimizer;
boost::shared_ptr<Pose2Config> config(new Pose2Config);
config->insert(1, Pose2(0.,0.,0.));
config->insert(2, Pose2(1.5,0.,0.));
boost::shared_ptr<Pose2Graph> graph(new Pose2Graph);
graph->addPrior(1, Pose2(0.,0.,0.), Isotropic::Sigma(3, 1e-10));
graph->addConstraint(1,2, Pose2(1.,0.,0.), Isotropic::Sigma(3, 1));
boost::shared_ptr<Ordering> ordering(new Ordering);
ordering->push_back(Pose2Config::Key(1));
ordering->push_back(Pose2Config::Key(2));
Optimizer optimizer(graph, ordering, config);
Optimizer optimized = optimizer.iterateLM();
Pose2Config expected;
expected.insert(1, Pose2(0.,0.,0.));
expected.insert(2, Pose2(1.,0.,0.));
CHECK(assert_equal(expected, *optimized.config(), 1e-5));
}
//TEST( NonlinearOptimizer, delta )
//{
// shared_ptr<Graph> fg(new Graph(
// createNonlinearFactorGraph()));
// Optimizer::shared_config initial = sharedNoisyConfig();
//
// // Expected configuration is the difference between the noisy config
// // and the ground-truth config. One step only because it's linear !
// VectorConfig expected;
// Vector dl1(2);
// dl1(0) = -0.1;
// dl1(1) = 0.1;
// expected.insert("l1", dl1);
// Vector dx1(2);
// dx1(0) = -0.1;
// dx1(1) = -0.1;
// expected.insert("x1", dx1);
// Vector dx2(2);
// dx2(0) = 0.1;
// dx2(1) = -0.2;
// expected.insert("x2", dx2);
//
// // Check one ordering
// shared_ptr<Ordering> ord1(new Ordering());
// *ord1 += "x2","l1","x1";
// Optimizer optimizer1(fg, ord1, initial);
// VectorConfig actual1 = optimizer1.linearizeAndOptimizeForDelta();
// CHECK(assert_equal(actual1,expected));
//
// // Check another
// shared_ptr<Ordering> ord2(new Ordering());
// *ord2 += "x1","x2","l1";
// Optimizer optimizer2(fg, ord2, initial);
// VectorConfig actual2 = optimizer2.linearizeAndOptimizeForDelta();
// CHECK(assert_equal(actual2,expected));
//
// // And yet another...
// shared_ptr<Ordering> ord3(new Ordering());
// *ord3 += "l1","x1","x2";
// Optimizer optimizer3(fg, ord3, initial);
// VectorConfig actual3 = optimizer3.linearizeAndOptimizeForDelta();
// CHECK(assert_equal(actual3,expected));
//}
//
///* ************************************************************************* */
//TEST( NonlinearOptimizer, iterateLM )
//{
// // really non-linear factor graph
// shared_ptr<Graph> fg(new Graph(
// createReallyNonlinearFactorGraph()));
//
// // config far from minimum
// Point2 x0(3,0);
// boost::shared_ptr<Config> config(new Config);
// config->insert(simulated2D::PoseKey(1), x0);
//
// // ordering
// shared_ptr<Ordering> ord(new Ordering());
// ord->push_back("x1");
//
// // create initial optimization state, with lambda=0
// Optimizer::shared_solver solver(new Factorization<
// Graph, Config> );
// Optimizer optimizer(fg, ord, config, solver, 0.);
//
// // normal iterate
// Optimizer iterated1 = optimizer.iterate();
//
// // LM iterate with lambda 0 should be the same
// Optimizer iterated2 = optimizer.iterateLM();
//
// // Try successive iterates. TODO: ugly pointers, better way ?
// Optimizer *pointer = new Optimizer(iterated2);
// for (int i=0;i<10;i++) {
// Optimizer* newOptimizer = new Optimizer(pointer->iterateLM());
// delete pointer;
// pointer = newOptimizer;
// }
// delete(pointer);
//
// CHECK(assert_equal(*iterated1.config(), *iterated2.config(), 1e-9));
//}
//
///* ************************************************************************* */
//TEST( NonlinearOptimizer, optimize )
//{
// shared_ptr<Graph> fg(new Graph(
// createReallyNonlinearFactorGraph()));
//
// // test error at minimum
// Point2 xstar(0,0);
// Config cstar;
// cstar.insert(simulated2D::PoseKey(1), xstar);
// DOUBLES_EQUAL(0.0,fg->error(cstar),0.0);
//
// // test error at initial = [(1-cos(3))^2 + (sin(3))^2]*50 =
// Point2 x0(3,3);
// boost::shared_ptr<Config> c0(new Config);
// c0->insert(simulated2D::PoseKey(1), x0);
// DOUBLES_EQUAL(199.0,fg->error(*c0),1e-3);
//
// // optimize parameters
// shared_ptr<Ordering> ord(new Ordering());
// ord->push_back("x1");
// double relativeThreshold = 1e-5;
// double absoluteThreshold = 1e-5;
//
// // initial optimization state is the same in both cases tested
// Optimizer optimizer(fg, ord, c0);
//
// // Gauss-Newton
// Optimizer actual1 = optimizer.gaussNewton(relativeThreshold,
// absoluteThreshold);
// DOUBLES_EQUAL(0,fg->error(*(actual1.config())),1e-3);
//
// // Levenberg-Marquardt
// Optimizer actual2 = optimizer.levenbergMarquardt(relativeThreshold,
// absoluteThreshold, Optimizer::SILENT);
// DOUBLES_EQUAL(0,fg->error(*(actual2.config())),1e-3);
//}
//
///* ************************************************************************* */
//TEST( NonlinearOptimizer, Factorization )
//{
// typedef NonlinearOptimizer<Pose2Graph, Pose2Config, GaussianFactorGraph, Factorization<Pose2Graph, Pose2Config> > Optimizer;
//
// boost::shared_ptr<Pose2Config> config(new Pose2Config);
// config->insert(1, Pose2(0.,0.,0.));
// config->insert(2, Pose2(1.5,0.,0.));
//
// boost::shared_ptr<Pose2Graph> graph(new Pose2Graph);
// graph->addPrior(1, Pose2(0.,0.,0.), Isotropic::Sigma(3, 1e-10));
// graph->addConstraint(1,2, Pose2(1.,0.,0.), Isotropic::Sigma(3, 1));
//
// boost::shared_ptr<Ordering> ordering(new Ordering);
// ordering->push_back(Pose2Config::Key(1));
// ordering->push_back(Pose2Config::Key(2));
//
// Optimizer optimizer(graph, ordering, config);
// Optimizer optimized = optimizer.iterateLM();
//
// Pose2Config expected;
// expected.insert(1, Pose2(0.,0.,0.));
// expected.insert(2, Pose2(1.,0.,0.));
// CHECK(assert_equal(expected, *optimized.config(), 1e-5));
//}
/* ************************************************************************* */
TEST( NonlinearOptimizer, SubgraphPCG )
{
typedef NonlinearOptimizer<Pose2Graph, Pose2Config, SubgraphPCG<Pose2Graph, Pose2Config> > Optimizer;
typedef NonlinearOptimizer<Pose2Graph, Pose2Config, SubgraphPreconditioner, SubgraphPCG<Pose2Graph, Pose2Config> > Optimizer;
boost::shared_ptr<Pose2Config> config(new Pose2Config);
config->insert(1, Pose2(0.,0.,0.));

View File

@ -17,12 +17,10 @@
#include "NonlinearEquality.h"
#include "VectorConfig.h"
#include "Ordering.h"
#include "NonlinearOptimizer.h"
#include "SQPOptimizer.h"
// implementations
#include "NonlinearConstraint-inl.h"
#include "NonlinearOptimizer-inl.h"
#include "SQPOptimizer-inl.h"
using namespace std;

View File

@ -75,13 +75,14 @@ double error(const VectorConfig& x) {
boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b
// Get the spanning tree and corresponding ordering
GaussianFactorGraph Ab1, Ab2; // A1*x-b1 and A2*x-b2
boost::tie(Ab1, Ab2) = splitOffPlanarTree(N, Ab);
GaussianFactorGraph Ab1, Ab2_; // A1*x-b1 and A2*x-b2
boost::tie(Ab1, Ab2_) = splitOffPlanarTree(N, Ab);
SubgraphPreconditioner::sharedFG Ab2(new GaussianFactorGraph(Ab2_));
// Eliminate the spanning tree to build a prior
Ordering ordering = planarOrdering(N);
GaussianBayesNet Rc1 = Ab1.eliminate(ordering); // R1*x-c1
VectorConfig xbar = optimize(Rc1); // xbar = inv(R1)*c1
SubgraphPreconditioner::sharedBayesNet Rc1 = Ab1.eliminate_(ordering); // R1*x-c1
SubgraphPreconditioner::sharedConfig xbar = optimize_(*Rc1); // xbar = inv(R1)*c1
SubgraphPreconditioner system(Rc1, Ab2, xbar);
return system.error(x);
@ -97,13 +98,14 @@ TEST( SubgraphPreconditioner, system )
boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b
// Get the spanning tree and corresponding ordering
GaussianFactorGraph Ab1, Ab2; // A1*x-b1 and A2*x-b2
boost::tie(Ab1, Ab2) = splitOffPlanarTree(N, Ab);
GaussianFactorGraph Ab1, Ab2_; // A1*x-b1 and A2*x-b2
boost::tie(Ab1, Ab2_) = splitOffPlanarTree(N, Ab);
SubgraphPreconditioner::sharedFG Ab2(new GaussianFactorGraph(Ab2_));
// Eliminate the spanning tree to build a prior
Ordering ordering = planarOrdering(N);
GaussianBayesNet Rc1 = Ab1.eliminate(ordering); // R1*x-c1
VectorConfig xbar = optimize(Rc1); // xbar = inv(R1)*c1
SubgraphPreconditioner::sharedBayesNet Rc1 = Ab1.eliminate_(ordering); // R1*x-c1
SubgraphPreconditioner::sharedConfig xbar = optimize_(*Rc1); // xbar = inv(R1)*c1
// Create Subgraph-preconditioned system
SubgraphPreconditioner system(Rc1, Ab2, xbar);
@ -173,13 +175,14 @@ TEST( SubgraphPreconditioner, conjugateGradients )
boost::tie(Ab, xtrue) = planarGraph(N); // A*x-b
// Get the spanning tree and corresponding ordering
GaussianFactorGraph Ab1, Ab2; // A1*x-b1 and A2*x-b2
boost::tie(Ab1, Ab2) = splitOffPlanarTree(N, Ab);
GaussianFactorGraph Ab1, Ab2_; // A1*x-b1 and A2*x-b2
boost::tie(Ab1, Ab2_) = splitOffPlanarTree(N, Ab);
SubgraphPreconditioner::sharedFG Ab2(new GaussianFactorGraph(Ab2_));
// Eliminate the spanning tree to build a prior
Ordering ordering = planarOrdering(N);
GaussianBayesNet Rc1 = Ab1.eliminate(ordering); // R1*x-c1
VectorConfig xbar = optimize(Rc1); // xbar = inv(R1)*c1
SubgraphPreconditioner::sharedBayesNet Rc1 = Ab1.eliminate_(ordering); // R1*x-c1
SubgraphPreconditioner::sharedConfig xbar = optimize_(*Rc1); // xbar = inv(R1)*c1
// Create Subgraph-preconditioned system
SubgraphPreconditioner system(Rc1, Ab2, xbar);