sync with develop branch commit c1f048d

release/4.3a0
krunalchande 2014-12-12 18:46:54 -05:00
parent 9df5ce9732
commit da318184ae
24 changed files with 636 additions and 731 deletions

1
.gitignore vendored
View File

@ -1,4 +1,5 @@
/build*
/doc*
*.pyc
*.DS_Store
/examples/Data/dubrovnik-3-7-pre-rewritten.txt

View File

@ -6,7 +6,7 @@
get_filename_component(OUR_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH)
if(EXISTS "${OUR_CMAKE_DIR}/CMakeCache.txt")
# In build tree
set(@PACKAGE_NAME@_INCLUDE_DIR @CMAKE_SOURCE_DIR@ @CMAKE_BINARY_DIR@ CACHE PATH "@PACKAGE_NAME@ include directory")
set(@PACKAGE_NAME@_INCLUDE_DIR @CMAKE_SOURCE_DIR@ CACHE PATH "@PACKAGE_NAME@ include directory")
else()
# Find installed library
set(@PACKAGE_NAME@_INCLUDE_DIR "${OUR_CMAKE_DIR}/@CONF_REL_INCLUDE_DIR@" CACHE PATH "@PACKAGE_NAME@ include directory")

10
gtsam.h
View File

@ -1250,14 +1250,6 @@ virtual class JacobianFactor : gtsam::GaussianFactor {
const gtsam::noiseModel::Diagonal* model);
JacobianFactor(size_t i1, Matrix A1, size_t i2, Matrix A2, size_t i3, Matrix A3,
Vector b, const gtsam::noiseModel::Diagonal* model);
JacobianFactor(size_t i1, Matrix A1, size_t i2, Matrix A2, size_t i3, Matrix A3,
size_t i4, Matrix A4, Vector b, const gtsam::noiseModel::Diagonal* model);
JacobianFactor(size_t i1, Matrix A1, size_t i2, Matrix A2, size_t i3, Matrix A3,
size_t i4, Matrix A4, size_t i5, Matrix A5, Vector b,
const gtsam::noiseModel::Diagonal* model);
JacobianFactor(size_t i1, Matrix A1, size_t i2, Matrix A2, size_t i3, Matrix A3,
size_t i4, Matrix A4, size_t i5, Matrix A5, size_t i6, Matrix A6, Vector b,
const gtsam::noiseModel::Diagonal* model);
JacobianFactor(const gtsam::GaussianFactorGraph& graph);
//Testable
@ -1664,7 +1656,6 @@ class NonlinearFactorGraph {
gtsam::Ordering orderingCOLAMD() const;
// Ordering* orderingCOLAMDConstrained(const gtsam::Values& c, const std::map<gtsam::Key,int>& constraints) const;
gtsam::GaussianFactorGraph* linearize(const gtsam::Values& values) const;
gtsam::GaussianFactorGraph* multipliedHessians(const gtsam::Values& values, const gtsam::VectorValues& duals) const;
gtsam::NonlinearFactorGraph clone() const;
// enabling serialization functionality
@ -1675,7 +1666,6 @@ virtual class NonlinearFactor {
// Factor base class
size_t size() const;
gtsam::KeyVector keys() const;
size_t dualKey() const;
void print(string s) const;
void printKeys(string s) const;
// NonlinearFactor

View File

@ -4,11 +4,20 @@ project(METIS)
# Add flags for currect directory and below
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
add_definitions(-Wno-unused-variable)
# add_definitions(-Wno-sometimes-uninitialized)
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 5.0 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 5.0)
add_definitions(-Wno-sometimes-uninitialized)
endif()
endif()
add_definitions(-Wno-unknown-pragmas)
#add_definitions(-Wunused-but-set-variable)
if(NOT ("${CMAKE_C_COMPILER_ID}" MATCHES "MSVC" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "MSVC"))
#add_definitions(-Wno-unknown-pragmas)
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.6 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.6)
add_definitions(-Wno-unused-but-set-variable)
endif()
endif()
set(GKLIB_PATH ${PROJECT_SOURCE_DIR}/GKlib CACHE PATH "path to GKlib")
set(SHARED FALSE CACHE BOOL "build a shared library")

View File

@ -59,9 +59,10 @@ typedef ptrdiff_t ssize_t;
#endif
#ifdef __MSC__
#if(_MSC_VER < 1700)
/* MSC does not have rint() function */
#define rint(x) ((int)((x)+0.5))
#endif
/* MSC does not have INFINITY defined */
#ifndef INFINITY
#define INFINITY FLT_MAX

View File

@ -543,8 +543,7 @@ Matrix collect(size_t nrMatrices, ...)
void vector_scale_inplace(const Vector& v, Matrix& A, bool inf_mask) {
const DenseIndex m = A.rows();
if (inf_mask) {
// only scale the first v.size() rows of A to support augmented Matrix
for (DenseIndex i=0; i<v.size(); ++i) {
for (DenseIndex i=0; i<m; ++i) {
const double& vi = v(i);
if (std::isfinite(vi))
A.row(i) *= vi;

View File

@ -463,7 +463,6 @@ GTSAM_EXPORT Matrix collect(size_t nrMatrices, ...);
* Arguments (Matrix, Vector) scales the columns,
* (Vector, Matrix) scales the rows
* @param inf_mask when true, will not scale with a NaN or inf value.
* The inplace version also allows v.size()<A.rows() and only scales the first v.size() rows of A.
*/
GTSAM_EXPORT void vector_scale_inplace(const Vector& v, Matrix& A, bool inf_mask = false); // row
GTSAM_EXPORT Matrix vector_scale(const Vector& v, const Matrix& A, bool inf_mask = false); // row

View File

@ -86,7 +86,7 @@ namespace gtsam {
* This template works for any type with equals
*/
template<class V>
bool assert_equal(const V& expected, const V& actual, double tol = 1e-8) {
bool assert_equal(const V& expected, const V& actual, double tol = 1e-9) {
if (actual.equals(expected, tol))
return true;
printf("Not equal:\n");

View File

@ -281,17 +281,12 @@ double weightedPseudoinverse(const Vector& a, const Vector& weights,
size_t m = weights.size();
static const double inf = std::numeric_limits<double>::infinity();
// Check once for zero entries of a.
// Check once for zero entries of a. TODO: really needed ?
vector<bool> isZero;
for (size_t i = 0; i < m; ++i) {
isZero.push_back(fabs(a[i]) < 1e-9);
// If there is a valid (a[i]!=0) inequality constraint (weight<0),
// ignore it by also setting isZero flag
if (!isZero[i] && (weights[i]<0)) isZero[i] = true;
}
for (size_t i = 0; i < m; ++i) isZero.push_back(fabs(a[i]) < 1e-9);
for (size_t i = 0; i < m; ++i) {
// If there is a valid (a!=0) constraint (sigma==0) return the first one
for (size_t i = 0; i < m; ++i) {
if (weights[i] == inf && !isZero[i]) {
// Basically, instead of doing a normal QR step with the weighted
// pseudoinverse, we enforce the constraint by turning

View File

@ -262,6 +262,7 @@ namespace gtsam {
/**
* Compute the gradient of the energy function, \f$ \nabla_{x=0} \left\Vert \Sigma^{-1} A x - b
* \right\Vert^2 \f$, centered around zero. The gradient is \f$ A^T(Ax-b) \f$.
* @param fg The Jacobian factor graph $(A,b)$
* @param [output] g A VectorValues to store the gradient, which must be preallocated,
* see allocateVectorValues
* @return The gradient as a VectorValues */
@ -321,12 +322,6 @@ namespace gtsam {
/// @}
/**
* Split constraints and unconstrained factors into two different graphs
* @return a pair of <unconstrained, constrained> graphs
*/
boost::tuple<GaussianFactorGraph, GaussianFactorGraph, GaussianFactorGraph> splitConstraints() const;
private:
/** Serialization function */
friend class boost::serialization::access;

View File

@ -650,7 +650,6 @@ EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys)
HessianFactor::shared_ptr jointFactor;
try {
jointFactor = boost::make_shared<HessianFactor>(factors, Scatter(factors, keys));
// jointFactor->print("jointFactor: ");
} catch(std::invalid_argument&) {
throw InvalidDenseElimination(
"EliminateCholesky was called with a request to eliminate variables that are not\n"
@ -666,7 +665,6 @@ EliminateCholesky(const GaussianFactorGraph& factors, const Ordering& keys)
// Erase the eliminated keys in the remaining factor
jointFactor->keys_.erase(jointFactor->begin(), jointFactor->begin() + numberOfKeysToEliminate);
} catch(CholeskyFailed&) {
// std::cout << "Problematic Hessian: " << jointFactor->information() << std::endl;
throw IndeterminantLinearSystemException(keys.front());
}

View File

@ -384,10 +384,7 @@ namespace gtsam {
void multiplyHessianAdd(double alpha, const double* x, double* y) const {};
/**
* eta for Hessian
* Ignore duals parameters. It's only valid for constraints, which need to be a JacobianFactor
*/
/// eta for Hessian
VectorValues gradientAtZero() const;
virtual void gradientAtZero(double* d) const;

View File

@ -19,25 +19,22 @@
#pragma once
#include <gtsam/linear/linearExceptions.h>
#include <boost/assign/list_of.hpp>
#include <boost/range/adaptor/transformed.hpp>
#include <boost/range/algorithm/for_each.hpp>
#include <boost/foreach.hpp>
namespace gtsam {
/* ************************************************************************* */
template<typename TERMS>
JacobianFactor::JacobianFactor(const TERMS&terms, const Vector &b,
const SharedDiagonal& model) {
JacobianFactor::JacobianFactor(const TERMS&terms, const Vector &b, const SharedDiagonal& model)
{
fillTerms(terms, b, model);
}
/* ************************************************************************* */
template<typename KEYS>
JacobianFactor::JacobianFactor(const KEYS& keys,
const VerticalBlockMatrix& augmentedMatrix, const SharedDiagonal& model) :
Base(keys), Ab_(augmentedMatrix) {
JacobianFactor::JacobianFactor(
const KEYS& keys, const VerticalBlockMatrix& augmentedMatrix, const SharedDiagonal& model) :
Base(keys), Ab_(augmentedMatrix)
{
// Check noise model dimension
if(model && (DenseIndex)model->dim() != augmentedMatrix.rows())
throw InvalidNoiseModel(augmentedMatrix.rows(), model->dim());
@ -58,20 +55,10 @@ JacobianFactor::JacobianFactor(const KEYS& keys,
model_ = model;
}
/* ************************************************************************* */
namespace internal {
static inline DenseIndex getColsJF(const std::pair<Key, Matrix>& p) {
return p.second.cols();
}
}
/* ************************************************************************* */
template<typename TERMS>
void JacobianFactor::fillTerms(const TERMS& terms, const Vector& b,
const SharedDiagonal& noiseModel) {
using boost::adaptors::transformed;
namespace br = boost::range;
void JacobianFactor::fillTerms(const TERMS& terms, const Vector& b, const SharedDiagonal& noiseModel)
{
// Check noise model dimension
if(noiseModel && (DenseIndex)noiseModel->dim() != b.size())
throw InvalidNoiseModel(b.size(), noiseModel->dim());
@ -79,27 +66,32 @@ void JacobianFactor::fillTerms(const TERMS& terms, const Vector& b,
// Resize base class key vector
Base::keys_.resize(terms.size());
// Construct block matrix - this uses the boost::range 'transformed' construct to apply
// internal::getColsJF (defined just above here in this file) to each term. This
// transforms the list of terms into a list of variable dimensions, by extracting the
// number of columns in each matrix. This is done to avoid separately allocating an
// array of dimensions before constructing the VerticalBlockMatrix.
Ab_ = VerticalBlockMatrix(terms | transformed(&internal::getColsJF), b.size(),
true);
// Get dimensions of matrices
std::vector<size_t> dimensions;
dimensions.reserve(terms.size());
for(typename TERMS::const_iterator it = terms.begin(); it != terms.end(); ++it) {
const std::pair<Key, Matrix>& term = *it;
const Matrix& Ai = term.second;
dimensions.push_back(Ai.cols());
}
// Construct block matrix
Ab_ = VerticalBlockMatrix(dimensions, b.size(), true);
// Check and add terms
DenseIndex i = 0; // For block index
for (typename TERMS::const_iterator termIt = terms.begin();
termIt != terms.end(); ++termIt) {
const std::pair<Key, Matrix>& term = *termIt;
for(typename TERMS::const_iterator it = terms.begin(); it != terms.end(); ++it) {
const std::pair<Key, Matrix>& term = *it;
Key key = term.first;
const Matrix& Ai = term.second;
// Check block rows
if (term.second.rows() != Ab_.rows())
throw InvalidMatrixBlock(Ab_.rows(), term.second.rows());
if(Ai.rows() != Ab_.rows())
throw InvalidMatrixBlock(Ab_.rows(), Ai.rows());
// Assign key and matrix
Base::keys_[i] = term.first;
Ab_(i) = term.second;
Base::keys_[i] = key;
Ab_(i) = Ai;
// Increment block index
++ i;

View File

@ -66,15 +66,6 @@ namespace gtsam {
return result;
}
/* ************************************************************************* */
VectorValues VectorValues::One(const VectorValues& other)
{
VectorValues result;
BOOST_FOREACH(const KeyValuePair& v, other)
result.values_.insert(make_pair(v.first, Vector::Ones(v.second.size())));
return result;
}
/* ************************************************************************* */
void VectorValues::update(const VectorValues& values)
{
@ -316,22 +307,6 @@ namespace gtsam {
return result;
}
/* ************************************************************************* */
VectorValues VectorValues::operator*(const VectorValues& c) const
{
if(this->size() != c.size())
throw invalid_argument("VectorValues::operator* called with different vector sizes");
assert_throw(hasSameStructure(c),
invalid_argument("VectorValues::operator* called with different vector sizes"));
VectorValues result;
// The result.end() hint here should result in constant-time inserts
for(const_iterator j1 = begin(), j2 = c.begin(); j1 != end(); ++j1, ++j2)
result.values_.insert(result.end(), make_pair(j1->first, j1->second * j2->second));
return result;
}
/* ************************************************************************* */
VectorValues VectorValues::subtract(const VectorValues& c) const
{

View File

@ -130,15 +130,12 @@ namespace gtsam {
/** Create a VectorValues with the same structure as \c other, but filled with zeros. */
static VectorValues Zero(const VectorValues& other);
/** Create a VectorValues with the same structure as \c other, but filled with a constant. */
static VectorValues One(const VectorValues& other);
/// @}
/// @name Standard Interface
/// @{
/** Number of variables stored. */
size_t size() const { return values_.size(); }
Key size() const { return values_.size(); }
/** Return the dimension of variable \c j. */
size_t dim(Key j) const { return at(j).rows(); }
@ -305,10 +302,6 @@ namespace gtsam {
* structure (checked when NDEBUG is not defined). */
VectorValues subtract(const VectorValues& c) const;
/** Element-wise multiplication. Both VectorValues must have the same structure
* (checked when NDEBUG is not defined). */
VectorValues operator*(const VectorValues& c) const;
/** Element-wise scaling by a constant. */
friend GTSAM_EXPORT VectorValues operator*(const double a, const VectorValues &v);

View File

@ -105,6 +105,24 @@ TEST(JacobianFactor, constructors_and_accessors)
EXPECT(noise == expected.get_model());
EXPECT(noise == actual.get_model());
}
{
// Test three-term constructor with std::map
JacobianFactor expected(
boost::make_iterator_range(terms.begin(), terms.begin() + 3), b, noise);
map<Key,Matrix> mapTerms;
// note order of insertion plays no role: order will be determined by keys
mapTerms.insert(terms[2]);
mapTerms.insert(terms[1]);
mapTerms.insert(terms[0]);
JacobianFactor actual(mapTerms, b, noise);
EXPECT(assert_equal(expected, actual));
LONGS_EQUAL((long)terms[2].first, (long)actual.keys().back());
EXPECT(assert_equal(terms[2].second, actual.getA(actual.end() - 1)));
EXPECT(assert_equal(b, expected.getb()));
EXPECT(assert_equal(b, actual.getb()));
EXPECT(noise == expected.get_model());
EXPECT(noise == actual.get_model());
}
{
// VerticalBlockMatrix constructor
JacobianFactor expected(
@ -576,7 +594,7 @@ TEST ( JacobianFactor, constraint_eliminate2 )
Matrix m(1,2);
Matrix Aempty = m.topRows(0);
Vector bempty = m.block(0,0,0,1);
JacobianFactor expectedLF(2, Aempty, bempty, noiseModel::Diagonal::Sigmas(Vector()));
JacobianFactor expectedLF(2, Aempty, bempty, noiseModel::Constrained::All(0));
EXPECT(assert_equal(expectedLF, *actual.second));
// verify CG

View File

@ -48,15 +48,6 @@ class Dummy {
unsigned char dummyTwoVar(unsigned char a) const;
};
#include <gtsam_unstable/linear/QPSolver.h>
class QPSolver {
QPSolver(const gtsam::GaussianFactorGraph &graph);
pair<gtsam::VectorValues, gtsam::VectorValues> optimize(const gtsam::VectorValues& initials) const;
pair<gtsam::VectorValues, gtsam::VectorValues> optimize() const;
pair<bool, gtsam::VectorValues> findFeasibleInitialValues() const;
};
#include <gtsam_unstable/dynamics/PoseRTV.h>
class PoseRTV {
PoseRTV();

View File

@ -121,3 +121,4 @@ int main()
TestResult tr; return TestRegistry::runAllTests(tr);
}
/* ************************************************************************* */

View File

@ -593,55 +593,6 @@ TEST( GaussianFactorGraph, conditional_sigma_failure) {
}
}
///* ************************************************************************* */
//TEST( GaussianFactorGraph, buildDualGraph1 )
//{
// GaussianFactorGraph fgc1 = createSimpleConstraintGraph();
// KeySet constrainedVariables1 = list_of(0)(1);
// VariableIndex variableIndex1(fgc1);
// VectorValues delta1 = createSimpleConstraintValues();
// GaussianFactorGraph::shared_ptr dualGraph1 = fgc1.buildDualGraph(
// constrainedVariables1, variableIndex1, delta1);
// GaussianFactorGraph expectedDualGraph1;
// expectedDualGraph1.push_back(JacobianFactor(3, eye(2), zero(2)));
// expectedDualGraph1.push_back(JacobianFactor(3, -eye(2), zero(2)));
// EXPECT(assert_equal(expectedDualGraph1, *dualGraph1));
//}
//
///* ************************************************************************* */
//TEST( GaussianFactorGraph, buildDualGraph2 )
//{
// GaussianFactorGraph fgc2 = createSingleConstraintGraph();
// KeySet constrainedVariables2 = list_of(0)(1);
// VariableIndex variableIndex2(fgc2);
// VectorValues delta2 = createSingleConstraintValues();
// GaussianFactorGraph::shared_ptr dualGraph2 = fgc2.buildDualGraph(
// constrainedVariables2, variableIndex2, delta2);
// GaussianFactorGraph expectedDualGraph2;
// expectedDualGraph2.push_back(JacobianFactor(3, (Matrix(2,2) << 1,2,2,1), zero(2)));
// expectedDualGraph2.push_back(JacobianFactor(3, 10*eye(2), zero(2)));
// EXPECT(assert_equal(expectedDualGraph2, *dualGraph2));
//}
//
///* ************************************************************************* */
//TEST( GaussianFactorGraph, buildDualGraph3 )
//{
// GaussianFactorGraph fgc3 = createMultiConstraintGraph();
// KeySet constrainedVariables3 = list_of(0)(1)(2);
// VariableIndex variableIndex3(fgc3);
// VectorValues delta3 = createMultiConstraintValues();
// GaussianFactorGraph::shared_ptr dualGraph3 = fgc3.buildDualGraph(
// constrainedVariables3, variableIndex3, delta3);
// GaussianFactorGraph expectedDualGraph3;
// expectedDualGraph3.push_back(
// JacobianFactor(3, (Matrix(2, 2) << 1, 2, 2, 1), 4,
// (Matrix(2, 2) << 3, -1, 4, -2), zero(2)));
// expectedDualGraph3.push_back(JacobianFactor(3, 10*eye(2), zero(2)));
// expectedDualGraph3.push_back(
// JacobianFactor(4, (Matrix(2, 2) << 1, 1, 1, 2), zero(2)));
// EXPECT(assert_equal(expectedDualGraph3, *dualGraph3));
//}
/* ************************************************************************* */
int main() { TestResult tr; return TestRegistry::runAllTests(tr);}
/* ************************************************************************* */

View File

@ -273,11 +273,11 @@ TEST_UNSAFE(NonlinearOptimizer, MoreOptimization) {
// test convergence
Values actual = optimizer.optimize();
EXPECT(assert_equal(expected, actual, 1e-6));
EXPECT(assert_equal(expected, actual));
// Check that the gradient is zero
GaussianFactorGraph::shared_ptr linear = optimizer.linearize();
EXPECT(assert_equal(expectedGradient,linear->gradientAtZero(), 1e-7));
EXPECT(assert_equal(expectedGradient,linear->gradientAtZero()));
}
EXPECT(assert_equal(expected, DoglegOptimizer(fg, init).optimize()));