diff --git a/base/Matrix.cpp b/base/Matrix.cpp index 819d9bda7..aac715b05 100644 --- a/base/Matrix.cpp +++ b/base/Matrix.cpp @@ -674,6 +674,21 @@ void householder(Matrix &A) { } } #endif + +void householderColMajor(MatrixColMajor &A) { + __CLPK_integer m = A.size1(); + __CLPK_integer n = A.size2(); + + double tau[n]; + double work_optimal_size; + __CLPK_integer lwork = -1; + __CLPK_integer info; + + dgeqrf_(&m, &n, A.data().begin(), &m, tau, &work_optimal_size, &lwork, &info); + lwork = (__CLPK_integer)work_optimal_size; + double work[lwork]; + dgeqrf_(&m, &n, A.data().begin(), &m, tau, work, &lwork, &info); +} #endif ///* ************************************************************************* */ diff --git a/base/Matrix.h b/base/Matrix.h index 8ce7dc59d..4f81032c9 100644 --- a/base/Matrix.h +++ b/base/Matrix.h @@ -264,15 +264,21 @@ void householder_(Matrix& A, size_t k); */ void householder(Matrix& A, size_t k); -/** - * Householder tranformation, zeros below diagonal - * @param k number of columns to zero out below diagonal - * @return nothing: in place !!! - */ #ifdef GT_USE_LAPACK #ifdef YA_BLAS -void householder(Matrix &A); +/** + * Householder tranformation, zeros below diagonal + * @return nothing: in place !!! + */ +void householder(Matrix& A); #endif + +/** + * Householder tranformation directly on a column-major matrix. Does not zero + * below the diagonal, so it will contain Householder vectors. + * @return nothing: in place !!! + */ +void householderColMajor(MatrixColMajor& A); #endif /** diff --git a/examples/PlanarSLAMSelfContained_advanced.cpp b/examples/PlanarSLAMSelfContained_advanced.cpp index 211871d98..70a26e261 100644 --- a/examples/PlanarSLAMSelfContained_advanced.cpp +++ b/examples/PlanarSLAMSelfContained_advanced.cpp @@ -38,6 +38,7 @@ #include #include #include +#include #include // Main typedefs diff --git a/examples/SimpleRotation.cpp b/examples/SimpleRotation.cpp index be541795e..3b53e8047 100644 --- a/examples/SimpleRotation.cpp +++ b/examples/SimpleRotation.cpp @@ -43,7 +43,6 @@ using namespace gtsam; typedef TypedSymbol Key; typedef LieValues Values; typedef NonlinearFactorGraph Graph; -typedef Factorization Solver; typedef NonlinearOptimizer Optimizer; const double degree = M_PI / 180; diff --git a/linear/Factorization.h b/linear/Factorization.h deleted file mode 100644 index 0f6848519..000000000 --- a/linear/Factorization.h +++ /dev/null @@ -1,74 +0,0 @@ -/* ---------------------------------------------------------------------------- - - * GTSAM Copyright 2010, Georgia Tech Research Corporation, - * Atlanta, Georgia 30332-0415 - * All Rights Reserved - * Authors: Frank Dellaert, et al. (see THANKS for the full author list) - - * See LICENSE for the license information - - * -------------------------------------------------------------------------- */ - -/** - * @file Factorization - * @brief Template Linear solver class that uses a Gaussian Factor Graph - * @author Kai Ni - * @author Frank Dellaert - */ - -// $Id: GaussianFactorGraph.h,v 1.24 2009/08/14 20:48:51 acunning Exp $ - -#pragma once - -#include -#include -#include -#include - -namespace gtsam { - - class Ordering; - - /** - * A linear system solver using factorization - */ - template - class Factorization { - private: - boost::shared_ptr ordering_; - - public: - Factorization(boost::shared_ptr ordering) - : ordering_(ordering) { - if (!ordering) throw std::invalid_argument("Factorization constructor: ordering = NULL"); - } - - /** - * solve for the optimal displacement in the tangent space, and then solve - * the resulted linear system - */ - VectorValues optimize(GaussianFactorGraph& fg) const { - return *GaussianSequentialSolver(fg).optimize(); - } - - /** - * linearize the non-linear graph around the current config - */ - boost::shared_ptr linearize(const NONLINEARGRAPH& g, const VALUES& config) const { - return g.linearize(config, *ordering_); - } - - /** - * Does not do anything here in Factorization. - */ - boost::shared_ptr prepareLinear(const GaussianFactorGraph& fg) const { - return boost::shared_ptr(new Factorization(*this)); - } - - /** expmap the Values given the stored Ordering */ - VALUES expmap(const VALUES& config, const VectorValues& delta) const { - return config.expmap(delta, *ordering_); - } - }; - -} diff --git a/linear/GaussianJunctionTree.cpp b/linear/GaussianJunctionTree.cpp index 1d409d83c..3b904bbdc 100644 --- a/linear/GaussianJunctionTree.cpp +++ b/linear/GaussianJunctionTree.cpp @@ -27,8 +27,9 @@ namespace gtsam { - // explicit template instantiation - template class JunctionTree; + // explicit template instantiation + template class JunctionTree; + template class ClusterTree; using namespace std; diff --git a/linear/Makefile.am b/linear/Makefile.am index d5009e3a9..666c735b8 100644 --- a/linear/Makefile.am +++ b/linear/Makefile.am @@ -25,7 +25,7 @@ check_PROGRAMS += tests/testVectorValues sources += GaussianSequentialSolver.cpp GaussianMultifrontalSolver.cpp # Gaussian Factor Graphs -headers += GaussianFactorSet.h Factorization.h +headers += GaussianFactorSet.h sources += GaussianFactor.cpp GaussianFactorGraph.cpp sources += GaussianJunctionTree.cpp sources += GaussianConditional.cpp GaussianBayesNet.cpp diff --git a/linear/NoiseModel.cpp b/linear/NoiseModel.cpp index f55bede1a..bbfbd36d1 100644 --- a/linear/NoiseModel.cpp +++ b/linear/NoiseModel.cpp @@ -219,10 +219,10 @@ SharedDiagonal Gaussian::QR(Matrix& Ab, boost::optional&> firs // General QR, see also special version in Constrained SharedDiagonal Gaussian::QRColumnWise(ublas::matrix& Ab, vector& firstZeroRows) const { - Matrix Abresult(Ab); - SharedDiagonal result = QR(Abresult, firstZeroRows); - Ab = Abresult; - return result; + WhitenInPlace(Ab); + householderColMajor(Ab); + size_t maxRank = min(Ab.size1(), Ab.size2()-1); + return Unit::Create(maxRank); // // get size(A) and maxRank // // TODO: really no rank problems ? // size_t m = Ab.size1(), n = Ab.size2()-1; @@ -402,14 +402,10 @@ SharedDiagonal Constrained::QR(Matrix& Ab, boost::optional&> f } SharedDiagonal Constrained::QRColumnWise(ublas::matrix& Ab, vector& firstZeroRows) const { -// Matrix AbRowWise(Ab); -// SharedDiagonal result = this->QR(AbRowWise, firstZeroRows); -// Ab = AbRowWise; -// return result; - Matrix Abresult(Ab); - SharedDiagonal result = QR(Abresult, firstZeroRows); - Ab = Abresult; - return result; + Matrix AbRowWise(Ab); + SharedDiagonal result = this->QR(AbRowWise, firstZeroRows); + Ab = AbRowWise; + return result; } /* ************************************************************************* */ diff --git a/nonlinear/NonlinearOptimizer.h b/nonlinear/NonlinearOptimizer.h index c41204762..bf2c40457 100644 --- a/nonlinear/NonlinearOptimizer.h +++ b/nonlinear/NonlinearOptimizer.h @@ -23,8 +23,8 @@ #include #include #include +#include #include -#include #include namespace gtsam { @@ -63,7 +63,7 @@ namespace gtsam { * * */ - template + template class NonlinearOptimizer { public: diff --git a/slam/tests/testPose2SLAM.cpp b/slam/tests/testPose2SLAM.cpp index 9239bab3e..cf47f01d3 100644 --- a/slam/tests/testPose2SLAM.cpp +++ b/slam/tests/testPose2SLAM.cpp @@ -113,8 +113,7 @@ TEST(Pose2Graph, optimize) { shared_ptr ordering(new Ordering); *ordering += "x0","x1"; typedef NonlinearOptimizer Optimizer; - Optimizer::shared_solver solver(new Optimizer::solver(ordering)); - Optimizer optimizer0(fg, initial, solver); + Optimizer optimizer0(fg, initial, ordering); Optimizer::Parameters::verbosityLevel verbosity = Optimizer::Parameters::SILENT; //Optimizer::verbosityLevel verbosity = Optimizer::ERROR; Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); @@ -153,8 +152,7 @@ TEST(Pose2Graph, optimizeThreePoses) { *ordering += "x0","x1","x2"; // optimize - pose2SLAM::Optimizer::shared_solver solver(new pose2SLAM::Optimizer::solver(ordering)); - pose2SLAM::Optimizer optimizer0(fg, initial, solver); + pose2SLAM::Optimizer optimizer0(fg, initial, ordering); pose2SLAM::Optimizer::Parameters::verbosityLevel verbosity = pose2SLAM::Optimizer::Parameters::SILENT; pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); @@ -197,8 +195,7 @@ TEST(Pose2Graph, optimizeCircle) { *ordering += "x0","x1","x2","x3","x4","x5"; // optimize - pose2SLAM::Optimizer::shared_solver solver(new pose2SLAM::Optimizer::solver(ordering)); - pose2SLAM::Optimizer optimizer0(fg, initial, solver); + pose2SLAM::Optimizer optimizer0(fg, initial, ordering); pose2SLAM::Optimizer::Parameters::verbosityLevel verbosity = pose2SLAM::Optimizer::Parameters::SILENT; pose2SLAM::Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); diff --git a/slam/tests/testPose3SLAM.cpp b/slam/tests/testPose3SLAM.cpp index 390fbed15..3f4b4b3b9 100644 --- a/slam/tests/testPose3SLAM.cpp +++ b/slam/tests/testPose3SLAM.cpp @@ -71,8 +71,7 @@ TEST(Pose3Graph, optimizeCircle) { shared_ptr ordering(new Ordering); *ordering += "x0","x1","x2","x3","x4","x5"; typedef NonlinearOptimizer Optimizer; - Optimizer::shared_solver solver(new Optimizer::solver(ordering)); - Optimizer optimizer0(fg, initial, solver); + Optimizer optimizer0(fg, initial, ordering); Optimizer::Parameters::verbosityLevel verbosity = Optimizer::Parameters::SILENT; // Optimizer::verbosityLevel verbosity = Optimizer::ERROR; Optimizer optimizer = optimizer0.levenbergMarquardt(1e-15, 1e-15, verbosity); diff --git a/slam/tests/testVSLAMGraph.cpp b/slam/tests/testVSLAMGraph.cpp index 14a5608cc..273d8a691 100644 --- a/slam/tests/testVSLAMGraph.cpp +++ b/slam/tests/testVSLAMGraph.cpp @@ -105,8 +105,7 @@ TEST( Graph, optimizeLM) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - Optimizer::shared_solver solver(new Optimizer::solver(ordering)); - Optimizer optimizer(graph, initialEstimate, solver); + Optimizer optimizer(graph, initialEstimate, ordering); DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Iterate once, and the config should not have changed because we started @@ -143,8 +142,7 @@ TEST( Graph, optimizeLM2) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - Optimizer::shared_solver solver(new Optimizer::solver(ordering)); - Optimizer optimizer(graph, initialEstimate, solver); + Optimizer optimizer(graph, initialEstimate, ordering); DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Iterate once, and the config should not have changed because we started @@ -179,8 +177,7 @@ TEST( Graph, CHECK_ORDERING) // Create an optimizer and check its error // We expect the initial to be zero because config is the ground truth - Optimizer::shared_solver solver(new Optimizer::solver(ordering)); - Optimizer optimizer(graph, initialEstimate, solver); + Optimizer optimizer(graph, initialEstimate, ordering); DOUBLES_EQUAL(0.0, optimizer.error(), 1e-9); // Iterate once, and the config should not have changed because we started diff --git a/tests/testNonlinearEquality.cpp b/tests/testNonlinearEquality.cpp index 3262d8396..43354cd56 100644 --- a/tests/testNonlinearEquality.cpp +++ b/tests/testNonlinearEquality.cpp @@ -197,8 +197,7 @@ TEST ( NonlinearEquality, allow_error_optimize ) { // optimize boost::shared_ptr ord(new Ordering()); ord->push_back(key1); - PoseOptimizer::shared_solver solver(new PoseOptimizer::solver(ord)); - PoseOptimizer optimizer(graph, init, solver); + PoseOptimizer optimizer(graph, init, ord); double relThresh = 1e-5, absThresh = 1e-5; PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::Parameters::SILENT); @@ -234,8 +233,7 @@ TEST ( NonlinearEquality, allow_error_optimize_with_factors ) { // optimize boost::shared_ptr ord(new Ordering()); ord->push_back(key1); - PoseOptimizer::shared_solver solver(new PoseOptimizer::solver(ord)); - PoseOptimizer optimizer(graph, init, solver); + PoseOptimizer optimizer(graph, init, ord); double relThresh = 1e-5, absThresh = 1e-5; PoseOptimizer result = optimizer.levenbergMarquardt(relThresh, absThresh, PoseOptimizer::Parameters::SILENT); diff --git a/tests/testNonlinearOptimizer.cpp b/tests/testNonlinearOptimizer.cpp index 4c1098091..8358da7ad 100644 --- a/tests/testNonlinearOptimizer.cpp +++ b/tests/testNonlinearOptimizer.cpp @@ -69,11 +69,8 @@ TEST( NonlinearOptimizer, linearizeAndOptimizeForDelta ) dx2(1) = -0.2; expected[ord1["x2"]] = dx2; - Optimizer::shared_solver solver; - // Check one ordering - solver = Optimizer::shared_solver(new Optimizer::solver(Ordering::shared_ptr(new Ordering(ord1)))); - Optimizer optimizer1(fg, initial, solver); + Optimizer optimizer1(fg, initial, Optimizer::shared_ordering(new Ordering(ord1))); VectorValues actual1 = optimizer1.linearizeAndOptimizeForDelta(); CHECK(assert_equal(actual1,expected)); @@ -123,8 +120,7 @@ TEST( NonlinearOptimizer, iterateLM ) ord->push_back("x1"); // create initial optimization state, with lambda=0 - Optimizer::shared_solver solver(new Optimizer::solver(ord)); - Optimizer optimizer(fg, config, solver, 0.); + Optimizer optimizer(fg, config, ord, 0.); // normal iterate Optimizer iterated1 = optimizer.iterate(); @@ -169,8 +165,7 @@ TEST( NonlinearOptimizer, optimize ) double absoluteThreshold = 1e-5; // initial optimization state is the same in both cases tested - Optimizer::shared_solver solver(new Optimizer::solver(ord)); - Optimizer optimizer(fg, c0, solver); + Optimizer optimizer(fg, c0, ord); // Gauss-Newton Optimizer actual1 = optimizer.gaussNewton(relativeThreshold, @@ -240,7 +235,7 @@ TEST( NonlinearOptimizer, SimpleGNOptimizer_noshared ) /* ************************************************************************* */ TEST( NonlinearOptimizer, Factorization ) { - typedef NonlinearOptimizer > Optimizer; + typedef NonlinearOptimizer Optimizer; boost::shared_ptr config(new Pose2Values); config->insert(1, Pose2(0.,0.,0.)); @@ -253,9 +248,8 @@ TEST( NonlinearOptimizer, Factorization ) boost::shared_ptr ordering(new Ordering); ordering->push_back(Pose2Values::Key(1)); ordering->push_back(Pose2Values::Key(2)); - Optimizer::shared_solver solver(new Factorization(ordering)); - Optimizer optimizer(graph, config, solver); + Optimizer optimizer(graph, config, ordering); Optimizer optimized = optimizer.iterateLM(); Pose2Values expected;