diff --git a/gtsam/base/Matrix.cpp b/gtsam/base/Matrix.cpp index 050f66ff0..57a45d4a1 100644 --- a/gtsam/base/Matrix.cpp +++ b/gtsam/base/Matrix.cpp @@ -26,8 +26,8 @@ #include #include -#include -#include +#include +#include #include #include #include diff --git a/gtsam/base/Vector.cpp b/gtsam/base/Vector.cpp index 2d319961d..98ad43e05 100644 --- a/gtsam/base/Vector.cpp +++ b/gtsam/base/Vector.cpp @@ -16,7 +16,7 @@ * @author Frank Dellaert */ -#include +#include #include #include #include @@ -25,11 +25,7 @@ #include #include #include -#include - -#ifdef WIN32 -#include -#endif +#include #include #include @@ -37,6 +33,10 @@ #include #include +//#ifdef WIN32 +//#include +//#endif + using namespace std; boost::minstd_rand generator(42u); @@ -56,11 +56,11 @@ void odprintf_(const char *format, ostream& stream, ...) { #endif va_end(args); -#ifdef WIN32 - OutputDebugString(buf); -#else +//#ifdef WIN32 +// OutputDebugString(buf); +//#else stream << buf; -#endif +//#endif } /* ************************************************************************* */ @@ -77,11 +77,11 @@ void odprintf(const char *format, ...) { #endif va_end(args); -#ifdef WIN32 - OutputDebugString(buf); -#else +//#ifdef WIN32 +// OutputDebugString(buf); +//#else cout << buf; -#endif +//#endif } /* ************************************************************************* */ diff --git a/gtsam/base/Vector.h b/gtsam/base/Vector.h index 84fd506cb..b4be74f4e 100644 --- a/gtsam/base/Vector.h +++ b/gtsam/base/Vector.h @@ -22,6 +22,7 @@ #include #include +#include #include #include diff --git a/gtsam/base/types.h b/gtsam/base/types.h index 6106f49fd..e9264d516 100644 --- a/gtsam/base/types.h +++ b/gtsam/base/types.h @@ -84,4 +84,12 @@ using boost::math::isinf; #endif #endif + +#ifdef min +#undef min +#endif + +#ifdef max +#undef max +#endif diff --git a/gtsam/discrete/Signature.cpp b/gtsam/discrete/Signature.cpp index ee7c1e59a..439f20864 100644 --- a/gtsam/discrete/Signature.cpp +++ b/gtsam/discrete/Signature.cpp @@ -33,6 +33,7 @@ namespace gtsam { #ifdef BOOST_HAVE_PARSER namespace qi = boost::spirit::qi; + namespace ph = boost::phoenix; // parser for strings of form "99/1 80/20" etc... namespace parser { @@ -85,9 +86,9 @@ namespace gtsam { // check for OR, AND on whole phrase It f = spec.begin(), l = spec.end(); if (qi::parse(f, l, - qi::lit("OR")[ref(table) = logic(false, true, true, true)]) || + qi::lit("OR")[ph::ref(table) = logic(false, true, true, true)]) || qi::parse(f, l, - qi::lit("AND")[ref(table) = logic(false, false, false, true)])) + qi::lit("AND")[ph::ref(table) = logic(false, false, false, true)])) return true; // tokenize into separate rows @@ -97,9 +98,9 @@ namespace gtsam { Signature::Row values; It tf = token.begin(), tl = token.end(); bool r = qi::parse(tf, tl, - qi::double_[push_back(ref(values), qi::_1)] >> +("/" >> qi::double_[push_back(ref(values), qi::_1)]) | - qi::lit("T")[ref(values) = T] | - qi::lit("F")[ref(values) = F] ); + qi::double_[push_back(ph::ref(values), qi::_1)] >> +("/" >> qi::double_[push_back(ph::ref(values), qi::_1)]) | + qi::lit("T")[ph::ref(values) = T] | + qi::lit("F")[ph::ref(values) = F] ); if (!r) return false; table.push_back(values); diff --git a/gtsam/geometry/Point3.h b/gtsam/geometry/Point3.h index 9eeb748db..7f7845dfb 100644 --- a/gtsam/geometry/Point3.h +++ b/gtsam/geometry/Point3.h @@ -21,6 +21,7 @@ #pragma once +#include #include #include @@ -150,7 +151,7 @@ namespace gtsam { /** distance between two points */ double dist(const Point3& p2) const { - return sqrt(pow(x()-p2.x(),2.0) + pow(y()-p2.y(),2.0) + pow(z()-p2.z(),2.0)); + return std::sqrt(pow(x()-p2.x(),2.0) + pow(y()-p2.y(),2.0) + pow(z()-p2.z(),2.0)); } /** Distance of the point from the origin */ diff --git a/gtsam/inference/BayesTree-inl.h b/gtsam/inference/BayesTree-inl.h index 95b070739..00cd38609 100644 --- a/gtsam/inference/BayesTree-inl.h +++ b/gtsam/inference/BayesTree-inl.h @@ -53,7 +53,7 @@ namespace gtsam { template void BayesTree::getCliqueData(CliqueData& data, - BayesTree::sharedClique clique) const { + typename BayesTree::sharedClique clique) const { data.conditionalSizes.push_back((*clique)->nrFrontals()); data.separatorSizes.push_back((*clique)->nrParents()); BOOST_FOREACH(sharedClique c, clique->children_) { @@ -74,7 +74,7 @@ namespace gtsam { template void BayesTree::saveGraph(ostream &s, - BayesTree::sharedClique clique, + typename BayesTree::sharedClique clique, int parentnum) const { static int num = 0; bool first = true; diff --git a/gtsam/inference/ClusterTree-inl.h b/gtsam/inference/ClusterTree-inl.h index 0f1f823af..0a0b9e64e 100644 --- a/gtsam/inference/ClusterTree-inl.h +++ b/gtsam/inference/ClusterTree-inl.h @@ -58,7 +58,7 @@ namespace gtsam { /* ************************************************************************* */ template - bool ClusterTree::Cluster::equals(const ClusterTree::Cluster& other) const { + bool ClusterTree::Cluster::equals(const Cluster& other) const { if (frontal != other.frontal) return false; if (separator != other.separator) return false; if (children_.size() != other.children_.size()) return false; diff --git a/gtsam/inference/GenericMultifrontalSolver-inl.h b/gtsam/inference/GenericMultifrontalSolver-inl.h index 652db02ca..b24db8484 100644 --- a/gtsam/inference/GenericMultifrontalSolver-inl.h +++ b/gtsam/inference/GenericMultifrontalSolver-inl.h @@ -49,15 +49,14 @@ namespace gtsam { } /* ************************************************************************* */ - template - typename BayesTree::shared_ptr GenericMultifrontalSolver::eliminate( - typename FactorGraph::Eliminate function) const { + template + typename BayesTree::shared_ptr GenericMultifrontalSolver::eliminate(Eliminate function) const { // eliminate junction tree, returns pointer to root - typename BayesTree::sharedClique root = junctionTree_->eliminate(function); + typename BayesTree::sharedClique root = junctionTree_->eliminate(function); // create an empty Bayes tree and insert root clique - typename BayesTree::shared_ptr bayesTree(new BayesTree); + typename BayesTree::shared_ptr bayesTree(new BayesTree); bayesTree->insert(root); // return the Bayes tree diff --git a/gtsam/linear/GaussianConditional.cpp b/gtsam/linear/GaussianConditional.cpp index de459a00f..c048c8dd8 100644 --- a/gtsam/linear/GaussianConditional.cpp +++ b/gtsam/linear/GaussianConditional.cpp @@ -73,7 +73,7 @@ GaussianConditional::GaussianConditional(Index key, const Vector& d, const Matri const Matrix& R, const list >& parents, const Vector& sigmas) : IndexConditional(key, GetKeys(parents.size(), parents.begin(), parents.end())), rsd_(matrix_), sigmas_(sigmas) { assert(R.rows() <= R.cols()); - size_t dims[1+parents.size()+1]; + size_t* dims = (size_t*)alloca(sizeof(size_t)*(1+parents.size()+1)); // FIXME: alloca is bad, just ask Google. dims[0] = R.cols(); size_t j=1; std::list >::const_iterator parent=parents.begin(); @@ -95,7 +95,7 @@ GaussianConditional::GaussianConditional(const std::list Index_Matrix; BOOST_FOREACH(const Index_Matrix& term, terms) { diff --git a/gtsam/linear/HessianFactor.cpp b/gtsam/linear/HessianFactor.cpp index b1f829005..ebe94514b 100644 --- a/gtsam/linear/HessianFactor.cpp +++ b/gtsam/linear/HessianFactor.cpp @@ -188,7 +188,7 @@ HessianFactor::HessianFactor(const std::vector& js, const std::vectorsecond.slot; @@ -399,7 +399,7 @@ void HessianFactor::updateATA(const JacobianFactor& update, const Scatter& scatt // First build an array of slots tic(1, "slots"); - size_t slots[update.size()]; + size_t* slots = (size_t*)alloca(sizeof(size_t)*update.size()); // FIXME: alloca is bad, just ask Google. size_t slot = 0; BOOST_FOREACH(Index j, update) { slots[slot] = scatter.find(j)->second.slot; diff --git a/gtsam/linear/JacobianFactor.cpp b/gtsam/linear/JacobianFactor.cpp index e2facec40..37fd74c39 100644 --- a/gtsam/linear/JacobianFactor.cpp +++ b/gtsam/linear/JacobianFactor.cpp @@ -118,7 +118,7 @@ namespace gtsam { GaussianFactor(GetKeys(terms.size(), terms.begin(), terms.end())), model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_) { - size_t dims[terms.size()+1]; + size_t* dims = (size_t*)alloca(sizeof(size_t)*(terms.size()+1)); // FIXME: alloca is bad, just ask Google. for(size_t j=0; j >::const_iterator term=terms.begin(); for(; term!=terms.end(); ++term,++j) @@ -488,7 +488,7 @@ namespace gtsam { size_t>& varDims, size_t m) { keys_.resize(variableSlots.size()); std::transform(variableSlots.begin(), variableSlots.end(), begin(), - bind(&VariableSlots::const_iterator::value_type::first, + boost::bind(&VariableSlots::const_iterator::value_type::first, boost::lambda::_1)); varDims.push_back(1); Ab_.copyStructureFrom(BlockAb(matrix_, varDims.begin(), varDims.end(), m)); diff --git a/gtsam/linear/NoiseModel.h b/gtsam/linear/NoiseModel.h index b6cf4deac..87391be0a 100644 --- a/gtsam/linear/NoiseModel.h +++ b/gtsam/linear/NoiseModel.h @@ -20,6 +20,7 @@ #include #include +#include namespace gtsam { @@ -609,7 +610,7 @@ namespace gtsam { virtual bool equals(const Base& expected, const double tol=1e-8) const = 0; inline double sqrtWeight(const double &error) const - { return sqrt(weight(error)); } + { return std::sqrt(weight(error)); } /** produce a weight vector according to an error vector and the implemented * robust function */ diff --git a/gtsam/nonlinear/DoglegOptimizerImpl.cpp b/gtsam/nonlinear/DoglegOptimizerImpl.cpp index 480d76f6a..ccfdf781a 100644 --- a/gtsam/nonlinear/DoglegOptimizerImpl.cpp +++ b/gtsam/nonlinear/DoglegOptimizerImpl.cpp @@ -15,6 +15,7 @@ * @author Richard Roberts */ +#include #include namespace gtsam { @@ -27,12 +28,12 @@ VectorValues DoglegOptimizerImpl::ComputeDoglegPoint( double DeltaSq = Delta*Delta; double x_u_norm_sq = dx_u.vector().squaredNorm(); double x_n_norm_sq = dx_n.vector().squaredNorm(); - if(verbose) cout << "Steepest descent magnitude " << sqrt(x_u_norm_sq) << ", Newton's method magnitude " << sqrt(x_n_norm_sq) << endl; + if(verbose) cout << "Steepest descent magnitude " << std::sqrt(x_u_norm_sq) << ", Newton's method magnitude " << std::sqrt(x_n_norm_sq) << endl; if(DeltaSq < x_u_norm_sq) { // Trust region is smaller than steepest descent update VectorValues x_d = VectorValues::SameStructure(dx_u); - x_d.vector() = dx_u.vector() * sqrt(DeltaSq / x_u_norm_sq); - if(verbose) cout << "In steepest descent region with fraction " << sqrt(DeltaSq / x_u_norm_sq) << " of steepest descent magnitude" << endl; + x_d.vector() = dx_u.vector() * std::sqrt(DeltaSq / x_u_norm_sq); + if(verbose) cout << "In steepest descent region with fraction " << std::sqrt(DeltaSq / x_u_norm_sq) << " of steepest descent magnitude" << endl; return x_d; } else if(DeltaSq < x_n_norm_sq) { // Trust region boundary is between steepest descent point and Newton's method point @@ -59,7 +60,7 @@ VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues& const double a = uu - 2.*un + nn; const double b = 2. * (un - uu); const double c = uu - Delta*Delta; - double sqrt_b_m4ac = sqrt(b*b - 4*a*c); + double sqrt_b_m4ac = std::sqrt(b*b - 4*a*c); // Compute blending parameter double tau1 = (-b + sqrt_b_m4ac) / (2.*a); diff --git a/gtsam/nonlinear/ISAM2-impl.cpp b/gtsam/nonlinear/ISAM2-impl.cpp index 8fe117e34..e8847e5c0 100644 --- a/gtsam/nonlinear/ISAM2-impl.cpp +++ b/gtsam/nonlinear/ISAM2-impl.cpp @@ -15,6 +15,7 @@ * @author Michael Kaess, Richard Roberts */ +#include #include #include #include @@ -149,7 +150,7 @@ void ISAM2::Impl::ExpmapMasked(Values& values, const Permuted& del cout << " " << keyFormatter(key_value->key) << " (j = " << var << "), delta = " << delta[var].transpose() << endl; } assert(delta[var].size() == (int)key_value->value.dim()); - assert(delta[var].unaryExpr(&isfinite).all()); + assert(delta[var].unaryExpr(ptr_fun(isfinite)).all()); if(mask[var]) { Value* retracted = key_value->value.retract_(delta[var]); key_value->value = *retracted; @@ -305,7 +306,7 @@ size_t ISAM2::Impl::UpdateDelta(const boost::shared_ptr& root, std: #ifndef NDEBUG for(size_t j=0; j).all()); + assert(delta.container()[j].unaryExpr(ptr_fun(isfinite)).all()); #endif } diff --git a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp index 1b19cfe75..b9523b538 100644 --- a/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp +++ b/gtsam/nonlinear/LevenbergMarquardtOptimizer.cpp @@ -16,6 +16,8 @@ * @created Feb 26, 2012 */ +#include + #include #include // For NegativeMatrixException @@ -48,7 +50,7 @@ void LevenbergMarquardtOptimizer::iterate() { // TODO: replace this dampening with a backsubstitution approach GaussianFactorGraph dampedSystem(*linear); { - double sigma = 1.0 / sqrt(state_.lambda); + double sigma = 1.0 / std::sqrt(state_.lambda); dampedSystem.reserve(dampedSystem.size() + dimensions_.size()); // for each of the variables, add a prior for(Index j=0; j