From 509800369193fcb9d02383e5700013ddf8172d9c Mon Sep 17 00:00:00 2001 From: Richard Roberts Date: Tue, 19 Nov 2013 14:04:52 +0000 Subject: [PATCH] Fixed several warnings --- gtsam.h | 16 +++++++-------- .../tests/testAlgebraicDecisionTree.cpp | 8 ++++---- gtsam/discrete/tests/testSignature.cpp | 20 +++++++++---------- gtsam/inference/tests/testKey.cpp | 4 ++-- gtsam/inference/tests/testLabeledSymbol.cpp | 4 ++-- gtsam/linear/HessianFactor.cpp | 16 +++++++-------- gtsam/linear/JacobianFactor-inl.h | 12 +++++------ .../NonlinearConjugateGradientOptimizer.cpp | 2 +- .../NonlinearConjugateGradientOptimizer.h | 4 ++-- gtsam/nonlinear/NonlinearOptimizer.h | 4 ++-- gtsam/nonlinear/NonlinearOptimizerParams.h | 6 +++--- gtsam/slam/dataset.cpp | 16 +++++++-------- 12 files changed, 56 insertions(+), 56 deletions(-) diff --git a/gtsam.h b/gtsam.h index 7fa4b23e5..f7650b228 100644 --- a/gtsam.h +++ b/gtsam.h @@ -1445,15 +1445,15 @@ virtual class IterativeOptimizationParameters { #include virtual class ConjugateGradientParameters : gtsam::IterativeOptimizationParameters { ConjugateGradientParameters(); - size_t getMinIterations() const ; - size_t getMaxIterations() const ; - size_t getReset() const; + int getMinIterations() const ; + int getMaxIterations() const ; + int getReset() const; double getEpsilon_rel() const; double getEpsilon_abs() const; - void setMinIterations(size_t value); - void setMaxIterations(size_t value); - void setReset(size_t value); + void setMinIterations(int value); + void setMaxIterations(int value); + void setReset(int value); void setEpsilon_rel(double value); void setEpsilon_abs(double value); void print(); @@ -1764,13 +1764,13 @@ virtual class NonlinearOptimizerParams { NonlinearOptimizerParams(); void print(string s) const; - size_t getMaxIterations() const; + int getMaxIterations() const; double getRelativeErrorTol() const; double getAbsoluteErrorTol() const; double getErrorTol() const; string getVerbosity() const; - void setMaxIterations(size_t value); + void setMaxIterations(int value); void setRelativeErrorTol(double value); void setAbsoluteErrorTol(double value); void setErrorTol(double value); diff --git a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp index cf938c901..f8bcb45c2 100644 --- a/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp +++ b/gtsam/discrete/tests/testAlgebraicDecisionTree.cpp @@ -172,7 +172,7 @@ TEST(ADT, joint) dot(joint, "Asia-ASTLBEX"); joint = apply(joint, pD, &mul); dot(joint, "Asia-ASTLBEXD"); - EXPECT_LONGS_EQUAL(346, muls); + EXPECT_LONGS_EQUAL(346, (long)muls); printCounts("Asia joint"); ADT pASTL = pA; @@ -223,7 +223,7 @@ TEST(ADT, inference) dot(joint, "Joint-Product-ASTLBEX"); joint = apply(joint, pD, &mul); dot(joint, "Joint-Product-ASTLBEXD"); - EXPECT_LONGS_EQUAL(370, muls); // different ordering + EXPECT_LONGS_EQUAL(370, (long)muls); // different ordering printCounts("Asia product"); ADT marginal = joint; @@ -235,7 +235,7 @@ TEST(ADT, inference) dot(marginal, "Joint-Sum-ADBLE"); marginal = marginal.combine(E, &add_); dot(marginal, "Joint-Sum-ADBL"); - EXPECT_LONGS_EQUAL(161, adds); + EXPECT_LONGS_EQUAL(161, (long)adds); printCounts("Asia sum"); } @@ -264,7 +264,7 @@ TEST(ADT, factor_graph) fg = apply(fg, pX, &mul); fg = apply(fg, pD, &mul); dot(fg, "FactorGraph"); - EXPECT_LONGS_EQUAL(158, muls); + EXPECT_LONGS_EQUAL(158, (long)muls); printCounts("Asia FG"); fg = fg.combine(X, &add_); diff --git a/gtsam/discrete/tests/testSignature.cpp b/gtsam/discrete/tests/testSignature.cpp index e32f1cf33..de47a00f3 100644 --- a/gtsam/discrete/tests/testSignature.cpp +++ b/gtsam/discrete/tests/testSignature.cpp @@ -32,15 +32,15 @@ DiscreteKey X(0,2), Y(1,3), Z(2,2); TEST(testSignature, simple_conditional) { Signature sig(X | Y = "1/1 2/3 1/4"); DiscreteKey actKey = sig.key(); - LONGS_EQUAL(X.first, actKey.first); + LONGS_EQUAL((long)X.first, (long)actKey.first); DiscreteKeys actKeys = sig.discreteKeysParentsFirst(); - LONGS_EQUAL(2, actKeys.size()); - LONGS_EQUAL(Y.first, actKeys.front().first); - LONGS_EQUAL(X.first, actKeys.back().first); + LONGS_EQUAL(2, (long)actKeys.size()); + LONGS_EQUAL((long)Y.first, (long)actKeys.front().first); + LONGS_EQUAL((long)X.first, (long)actKeys.back().first); vector actCpt = sig.cpt(); - EXPECT_LONGS_EQUAL(6, actCpt.size()); + EXPECT_LONGS_EQUAL(6, (long)actCpt.size()); } /* ************************************************************************* */ @@ -54,15 +54,15 @@ TEST(testSignature, simple_conditional_nonparser) { Signature sig(X | Y = table); DiscreteKey actKey = sig.key(); - EXPECT_LONGS_EQUAL(X.first, actKey.first); + EXPECT_LONGS_EQUAL((long)X.first, (long)actKey.first); DiscreteKeys actKeys = sig.discreteKeysParentsFirst(); - LONGS_EQUAL(2, actKeys.size()); - LONGS_EQUAL(Y.first, actKeys.front().first); - LONGS_EQUAL(X.first, actKeys.back().first); + LONGS_EQUAL(2, (long)actKeys.size()); + LONGS_EQUAL((long)Y.first, (long)actKeys.front().first); + LONGS_EQUAL((long)X.first, (long)actKeys.back().first); vector actCpt = sig.cpt(); - EXPECT_LONGS_EQUAL(6, actCpt.size()); + EXPECT_LONGS_EQUAL(6, (long)actCpt.size()); } /* ************************************************************************* */ diff --git a/gtsam/inference/tests/testKey.cpp b/gtsam/inference/tests/testKey.cpp index 8eb840cbc..b9735b646 100644 --- a/gtsam/inference/tests/testKey.cpp +++ b/gtsam/inference/tests/testKey.cpp @@ -46,7 +46,7 @@ TEST(Key, KeySymbolEncoding) { Key key = 0x6100000000000005; string str = "a5"; - EXPECT_LONGS_EQUAL(key, (Key)symbol); + EXPECT_LONGS_EQUAL((long)key, (long)(Key)symbol); EXPECT(assert_equal(str, DefaultKeyFormatter(symbol))); EXPECT(assert_equal(symbol, Symbol(key))); } else if(sizeof(Key) == 4) { @@ -54,7 +54,7 @@ TEST(Key, KeySymbolEncoding) { Key key = 0x61000005; string str = "a5"; - EXPECT_LONGS_EQUAL(key, (Key)symbol); + EXPECT_LONGS_EQUAL((long)key, (long)(Key)symbol); EXPECT(assert_equal(str, DefaultKeyFormatter(symbol))); EXPECT(assert_equal(symbol, Symbol(key))); } diff --git a/gtsam/inference/tests/testLabeledSymbol.cpp b/gtsam/inference/tests/testLabeledSymbol.cpp index f8b785eb7..4e56dfba0 100644 --- a/gtsam/inference/tests/testLabeledSymbol.cpp +++ b/gtsam/inference/tests/testLabeledSymbol.cpp @@ -49,7 +49,7 @@ TEST( testLabeledSymbol, KeyLabeledSymbolEncoding ) { Key key = 0x7841000000000005; string str = "xA5"; - EXPECT_LONGS_EQUAL(key, (Key)symbol); + EXPECT_LONGS_EQUAL((long)key, (long)(Key)symbol); EXPECT(assert_equal(str, MultiRobotKeyFormatter(symbol))); EXPECT(assert_equal(symbol, LabeledSymbol(key))); } else if(sizeof(Key) == 4) { @@ -57,7 +57,7 @@ TEST( testLabeledSymbol, KeyLabeledSymbolEncoding ) { Key key = 0x78410005; string str = "xA5"; - EXPECT_LONGS_EQUAL(key, (Key)symbol); + EXPECT_LONGS_EQUAL((long)key, (long)(Key) symbol); EXPECT(assert_equal(str, MultiRobotKeyFormatter(symbol))); EXPECT(assert_equal(symbol, LabeledSymbol(key))); } diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp index 1572521f6..404bf50b3 100644 --- a/gtsam/linear/HessianFactor.cpp +++ b/gtsam/linear/HessianFactor.cpp @@ -451,13 +451,13 @@ void HessianFactor::updateATA(const JacobianFactor& update, const Scatter& scatt // Apply updates to the upper triangle gttic(update); - size_t nrInfoBlocks = this->info_.nBlocks(), nrUpdateBlocks = updateBlocks.nBlocks(); - for(DenseIndex j2=0; j2info_.nBlocks(), nrUpdateBlocks = updateBlocks.nBlocks(); + for(DenseIndex j2 = 0; j2 < nrUpdateBlocks; ++j2) { // Horizontal block of Hessian DenseIndex slot2 = (j2 == update.size()) ? nrInfoBlocks-1 : slots[j2]; - assert(slot2>=0 && slot2<=nrInfoBlocks); - for(DenseIndex j1=0; j1<=j2; ++j1) { // Vertical block of Hessian + assert(slot2 >= 0 && slot2 <= nrInfoBlocks); + for(DenseIndex j1 = 0; j1 <= j2; ++j1) { // Vertical block of Hessian DenseIndex slot1 = (j1 == update.size()) ? nrInfoBlocks-1 : slots[j1]; - assert(slot1>=0 && slot1= 0 && slot1 < nrInfoBlocks); updateBlocks.offset(0); if(slot2 > slot1) info_(slot1, slot2).noalias() += updateBlocks(j1).transpose() * updateBlocks(j2); @@ -516,7 +516,7 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x, // Accessing the VectorValues one by one is expensive // So we will loop over columns to access x only once per column // And fill the above temporary y values, to be added into yvalues after - for (DenseIndex j = 0; j < size(); ++j) { + for (DenseIndex j = 0; j < (DenseIndex)size(); ++j) { // xj is the input vector Vector xj = x.at(keys_[j]); DenseIndex i = 0; @@ -525,12 +525,12 @@ void HessianFactor::multiplyHessianAdd(double alpha, const VectorValues& x, // blocks on the diagonal are only half y[i] += info_(j, j).selfadjointView() * xj; // for below diagonal, we take transpose block from upper triangular part - for (i=j+1; i < size(); ++i) + for (i = j + 1; i < (DenseIndex)size(); ++i) y[i] += info_(j, i).transpose() * xj; } // copy to yvalues - for (DenseIndex i = 0; i < size(); ++i) { + for(DenseIndex i = 0; i < (DenseIndex)size(); ++i) { bool didNotExist; VectorValues::iterator it; boost::tie(it, didNotExist) = yvalues.tryInsert(keys_[i], Vector()); diff --git a/gtsam/linear/JacobianFactor-inl.h b/gtsam/linear/JacobianFactor-inl.h index 4632f538f..38e84609a 100644 --- a/gtsam/linear/JacobianFactor-inl.h +++ b/gtsam/linear/JacobianFactor-inl.h @@ -61,8 +61,8 @@ namespace gtsam { } /* ************************************************************************* */ - namespace { - DenseIndex _getColsJF(const std::pair& p) { + namespace internal { + static DenseIndex getColsJF(const std::pair& p) { return p.second.cols(); } @@ -71,11 +71,11 @@ namespace gtsam { // assignment from boost::cref_list_of, where the term ends up wrapped in a assign_reference // that is implicitly convertible to T&. This was introduced to work around a problem where // BOOST_FOREACH over terms did not work on GCC. - struct _fillTerm { + struct fillTerm { FastVector& keys; VerticalBlockMatrix& Ab; DenseIndex& i; - _fillTerm(FastVector& keys, VerticalBlockMatrix& Ab, DenseIndex& i) + fillTerm(FastVector& keys, VerticalBlockMatrix& Ab, DenseIndex& i) : keys(keys), Ab(Ab), i(i) {} template @@ -123,12 +123,12 @@ namespace gtsam { // matrices, then extract the number of columns e.g. dimensions in each matrix. Then joins with // a single '1' to add a dimension for the b vector. { - Ab_ = VerticalBlockMatrix(br::join(terms | transformed(&_getColsJF), ListOfOne((DenseIndex)1)), b.size()); + Ab_ = VerticalBlockMatrix(br::join(terms | transformed(&internal::getColsJF), ListOfOne((DenseIndex)1)), b.size()); } // Check and add terms DenseIndex i = 0; // For block index - br::for_each(terms, _fillTerm(Base::keys_, Ab_, i)); + br::for_each(terms, internal::fillTerm(Base::keys_, Ab_, i)); // Assign RHS vector getb() = b; diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp index e5a501b15..107ec7d3f 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.cpp @@ -38,7 +38,7 @@ NonlinearConjugateGradientOptimizer::System::State NonlinearConjugateGradientOpt } void NonlinearConjugateGradientOptimizer::iterate() { - size_t dummy ; + int dummy ; boost::tie(state_.values, dummy) = nonlinearConjugateGradient(System(graph_), state_.values, params_, true /* single iterations */); ++state_.iterations; state_.error = graph_.error(state_.values); diff --git a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h index 3329d0adf..1ad8fa2ab 100644 --- a/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h +++ b/gtsam/nonlinear/NonlinearConjugateGradientOptimizer.h @@ -121,11 +121,11 @@ double lineSearch(const S &system, const V currentValues, const W &gradient) { * The last parameter is a switch between gradient-descent and conjugate gradient */ template -boost::tuple nonlinearConjugateGradient(const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, const bool singleIteration, const bool gradientDescent = false) { +boost::tuple nonlinearConjugateGradient(const S &system, const V &initial, const NonlinearOptimizerParams ¶ms, const bool singleIteration, const bool gradientDescent = false) { // GTSAM_CONCEPT_MANIFOLD_TYPE(V); - Key iteration = 0; + int iteration = 0; // check if we're already close enough double currentError = system.error(initial); diff --git a/gtsam/nonlinear/NonlinearOptimizer.h b/gtsam/nonlinear/NonlinearOptimizer.h index 72c2ee0a0..7b9fdba41 100644 --- a/gtsam/nonlinear/NonlinearOptimizer.h +++ b/gtsam/nonlinear/NonlinearOptimizer.h @@ -42,7 +42,7 @@ public: double error; /** The number of optimization iterations performed. */ - unsigned int iterations; + int iterations; NonlinearOptimizerState() {} @@ -166,7 +166,7 @@ public: double error() const { return _state().error; } /// return number of iterations - unsigned int iterations() const { return _state().iterations; } + int iterations() const { return _state().iterations; } /// return values const Values& values() const { return _state().values; } diff --git a/gtsam/nonlinear/NonlinearOptimizerParams.h b/gtsam/nonlinear/NonlinearOptimizerParams.h index 42fce2fed..ce5cb1de8 100644 --- a/gtsam/nonlinear/NonlinearOptimizerParams.h +++ b/gtsam/nonlinear/NonlinearOptimizerParams.h @@ -37,7 +37,7 @@ public: SILENT, ERROR, VALUES, DELTA, LINEAR }; - size_t maxIterations; ///< The maximum iterations to stop iterating (default 100) + int maxIterations; ///< The maximum iterations to stop iterating (default 100) double relativeErrorTol; ///< The maximum relative error decrease to stop iterating (default 1e-5) double absoluteErrorTol; ///< The maximum absolute error decrease to stop iterating (default 1e-5) double errorTol; ///< The maximum total error to stop iterating (default 0.0) @@ -52,7 +52,7 @@ public: } virtual void print(const std::string& str = "") const; - size_t getMaxIterations() const { + int getMaxIterations() const { return maxIterations; } double getRelativeErrorTol() const { @@ -68,7 +68,7 @@ public: return verbosityTranslator(verbosity); } - void setMaxIterations(size_t value) { + void setMaxIterations(int value) { maxIterations = value; } void setRelativeErrorTol(double value) { diff --git a/gtsam/slam/dataset.cpp b/gtsam/slam/dataset.cpp index 0f9988521..4762373e9 100644 --- a/gtsam/slam/dataset.cpp +++ b/gtsam/slam/dataset.cpp @@ -514,9 +514,9 @@ bool readBundler(const string& filename, SfM_data &data) // Get the color information float r, g, b; is >> r >> g >> b; - track.r = r/255.0; - track.g = g/255.0; - track.b = b/255.0; + track.r = r/255.f; + track.g = g/255.f; + track.b = b/255.f; // Now get the visibility information size_t nvisible = 0; @@ -593,9 +593,9 @@ bool readBAL(const string& filename, SfM_data &data) is >> x >> y >> z; SfM_Track& track = data.tracks[j]; track.p = Point3(x,y,z); - track.r = 0.4; - track.g = 0.4; - track.b = 0.4; + track.r = 0.4f; + track.g = 0.4f; + track.b = 0.4f; } is.close(); @@ -615,7 +615,7 @@ bool writeBAL(const string& filename, SfM_data &data) } // Write the number of camera poses and 3D points - int nrObservations=0; + size_t nrObservations=0; for (size_t j = 0; j < data.number_tracks(); j++){ nrObservations += data.tracks[j].number_measurements(); } @@ -628,7 +628,7 @@ bool writeBAL(const string& filename, SfM_data &data) SfM_Track track = data.tracks[j]; for(size_t k = 0; k < track.number_measurements(); k++){ // for each observation of the 3D point j - int i = track.measurements[k].first; // camera id + size_t i = track.measurements[k].first; // camera id double u0 = data.cameras[i].calibration().u0(); double v0 = data.cameras[i].calibration().v0();