More progress in compiling on windows

release/4.3a0
Richard Roberts 2012-05-22 22:52:17 +00:00
parent 25a53815e0
commit 510e2eacac
17 changed files with 62 additions and 47 deletions

View File

@ -26,8 +26,8 @@
#include <boost/foreach.hpp>
#include <boost/tuple/tuple.hpp>
#include <stdarg.h>
#include <string.h>
#include <cstdarg>
#include <cstring>
#include <iomanip>
#include <list>
#include <fstream>

View File

@ -16,7 +16,7 @@
* @author Frank Dellaert
*/
#include <stdarg.h>
#include <cstdarg>
#include <limits>
#include <iostream>
#include <fstream>
@ -25,11 +25,7 @@
#include <cmath>
#include <boost/foreach.hpp>
#include <boost/optional.hpp>
#include <stdio.h>
#ifdef WIN32
#include <Windows.h>
#endif
#include <cstdio>
#include <boost/random/normal_distribution.hpp>
#include <boost/random/variate_generator.hpp>
@ -37,6 +33,10 @@
#include <gtsam/base/Vector.h>
#include <gtsam/base/types.h>
//#ifdef WIN32
//#include <Windows.h>
//#endif
using namespace std;
boost::minstd_rand generator(42u);
@ -56,11 +56,11 @@ void odprintf_(const char *format, ostream& stream, ...) {
#endif
va_end(args);
#ifdef WIN32
OutputDebugString(buf);
#else
//#ifdef WIN32
// OutputDebugString(buf);
//#else
stream << buf;
#endif
//#endif
}
/* ************************************************************************* */
@ -77,11 +77,11 @@ void odprintf(const char *format, ...) {
#endif
va_end(args);
#ifdef WIN32
OutputDebugString(buf);
#else
//#ifdef WIN32
// OutputDebugString(buf);
//#else
cout << buf;
#endif
//#endif
}
/* ************************************************************************* */

View File

@ -22,6 +22,7 @@
#include <list>
#include <vector>
#include <gtsam/base/types.h>
#include <gtsam/3rdparty/Eigen/Eigen/Core>
#include <boost/random/linear_congruential.hpp>

View File

@ -84,4 +84,12 @@ using boost::math::isinf;
#endif
#endif
#ifdef min
#undef min
#endif
#ifdef max
#undef max
#endif

View File

@ -33,6 +33,7 @@ namespace gtsam {
#ifdef BOOST_HAVE_PARSER
namespace qi = boost::spirit::qi;
namespace ph = boost::phoenix;
// parser for strings of form "99/1 80/20" etc...
namespace parser {
@ -85,9 +86,9 @@ namespace gtsam {
// check for OR, AND on whole phrase
It f = spec.begin(), l = spec.end();
if (qi::parse(f, l,
qi::lit("OR")[ref(table) = logic(false, true, true, true)]) ||
qi::lit("OR")[ph::ref(table) = logic(false, true, true, true)]) ||
qi::parse(f, l,
qi::lit("AND")[ref(table) = logic(false, false, false, true)]))
qi::lit("AND")[ph::ref(table) = logic(false, false, false, true)]))
return true;
// tokenize into separate rows
@ -97,9 +98,9 @@ namespace gtsam {
Signature::Row values;
It tf = token.begin(), tl = token.end();
bool r = qi::parse(tf, tl,
qi::double_[push_back(ref(values), qi::_1)] >> +("/" >> qi::double_[push_back(ref(values), qi::_1)]) |
qi::lit("T")[ref(values) = T] |
qi::lit("F")[ref(values) = F] );
qi::double_[push_back(ph::ref(values), qi::_1)] >> +("/" >> qi::double_[push_back(ph::ref(values), qi::_1)]) |
qi::lit("T")[ph::ref(values) = T] |
qi::lit("F")[ph::ref(values) = F] );
if (!r)
return false;
table.push_back(values);

View File

@ -21,6 +21,7 @@
#pragma once
#include <cmath>
#include <boost/serialization/nvp.hpp>
#include <gtsam/base/Matrix.h>
@ -150,7 +151,7 @@ namespace gtsam {
/** distance between two points */
double dist(const Point3& p2) const {
return sqrt(pow(x()-p2.x(),2.0) + pow(y()-p2.y(),2.0) + pow(z()-p2.z(),2.0));
return std::sqrt(pow(x()-p2.x(),2.0) + pow(y()-p2.y(),2.0) + pow(z()-p2.z(),2.0));
}
/** Distance of the point from the origin */

View File

@ -53,7 +53,7 @@ namespace gtsam {
template<class CONDITIONAL, class CLIQUE>
void BayesTree<CONDITIONAL,CLIQUE>::getCliqueData(CliqueData& data,
BayesTree<CONDITIONAL,CLIQUE>::sharedClique clique) const {
typename BayesTree<CONDITIONAL,CLIQUE>::sharedClique clique) const {
data.conditionalSizes.push_back((*clique)->nrFrontals());
data.separatorSizes.push_back((*clique)->nrParents());
BOOST_FOREACH(sharedClique c, clique->children_) {
@ -74,7 +74,7 @@ namespace gtsam {
template<class CONDITIONAL, class CLIQUE>
void BayesTree<CONDITIONAL,CLIQUE>::saveGraph(ostream &s,
BayesTree<CONDITIONAL,CLIQUE>::sharedClique clique,
typename BayesTree<CONDITIONAL,CLIQUE>::sharedClique clique,
int parentnum) const {
static int num = 0;
bool first = true;

View File

@ -58,7 +58,7 @@ namespace gtsam {
/* ************************************************************************* */
template<class FG>
bool ClusterTree<FG>::Cluster::equals(const ClusterTree<FG>::Cluster& other) const {
bool ClusterTree<FG>::Cluster::equals(const Cluster& other) const {
if (frontal != other.frontal) return false;
if (separator != other.separator) return false;
if (children_.size() != other.children_.size()) return false;

View File

@ -49,15 +49,14 @@ namespace gtsam {
}
/* ************************************************************************* */
template<class F, class JT>
typename BayesTree<typename F::ConditionalType>::shared_ptr GenericMultifrontalSolver<F, JT>::eliminate(
typename FactorGraph<F>::Eliminate function) const {
template<class FACTOR, class JUNCTIONTREE>
typename BayesTree<typename FACTOR::ConditionalType>::shared_ptr GenericMultifrontalSolver<FACTOR, JUNCTIONTREE>::eliminate(Eliminate function) const {
// eliminate junction tree, returns pointer to root
typename BayesTree<typename F::ConditionalType>::sharedClique root = junctionTree_->eliminate(function);
typename BayesTree<typename FACTOR::ConditionalType>::sharedClique root = junctionTree_->eliminate(function);
// create an empty Bayes tree and insert root clique
typename BayesTree<typename F::ConditionalType>::shared_ptr bayesTree(new BayesTree<typename F::ConditionalType>);
typename BayesTree<typename FACTOR::ConditionalType>::shared_ptr bayesTree(new BayesTree<typename FACTOR::ConditionalType>);
bayesTree->insert(root);
// return the Bayes tree

View File

@ -73,7 +73,7 @@ GaussianConditional::GaussianConditional(Index key, const Vector& d, const Matri
const Matrix& R, const list<pair<Index, Matrix> >& parents, const Vector& sigmas) :
IndexConditional(key, GetKeys(parents.size(), parents.begin(), parents.end())), rsd_(matrix_), sigmas_(sigmas) {
assert(R.rows() <= R.cols());
size_t dims[1+parents.size()+1];
size_t* dims = (size_t*)alloca(sizeof(size_t)*(1+parents.size()+1)); // FIXME: alloca is bad, just ask Google.
dims[0] = R.cols();
size_t j=1;
std::list<std::pair<Index, Matrix> >::const_iterator parent=parents.begin();
@ -95,7 +95,7 @@ GaussianConditional::GaussianConditional(const std::list<std::pair<Index, Matrix
const size_t nrFrontals, const Vector& d, const Vector& sigmas) :
IndexConditional(GetKeys(terms.size(), terms.begin(), terms.end()), nrFrontals),
rsd_(matrix_), sigmas_(sigmas) {
size_t dims[terms.size()+1];
size_t* dims = (size_t*)alloca(sizeof(size_t)*(terms.size()+1)); // FIXME: alloca is bad, just ask Google.
size_t j=0;
typedef pair<Index, Matrix> Index_Matrix;
BOOST_FOREACH(const Index_Matrix& term, terms) {

View File

@ -188,7 +188,7 @@ HessianFactor::HessianFactor(const std::vector<Index>& js, const std::vector<Mat
}
// Create the dims vector
size_t dims[variable_count+1];
size_t* dims = (size_t*)alloca(sizeof(size_t)*(variable_count+1)); // FIXME: alloca is bad, just ask Google.
size_t total_size = 0;
for(unsigned int i = 0; i < variable_count; ++i){
dims[i] = gs[i].size();
@ -345,7 +345,7 @@ void HessianFactor::updateATA(const HessianFactor& update, const Scatter& scatte
// First build an array of slots
tic(1, "slots");
size_t slots[update.size()];
size_t* slots = (size_t*)alloca(sizeof(size_t)*update.size()); // FIXME: alloca is bad, just ask Google.
size_t slot = 0;
BOOST_FOREACH(Index j, update) {
slots[slot] = scatter.find(j)->second.slot;
@ -399,7 +399,7 @@ void HessianFactor::updateATA(const JacobianFactor& update, const Scatter& scatt
// First build an array of slots
tic(1, "slots");
size_t slots[update.size()];
size_t* slots = (size_t*)alloca(sizeof(size_t)*update.size()); // FIXME: alloca is bad, just ask Google.
size_t slot = 0;
BOOST_FOREACH(Index j, update) {
slots[slot] = scatter.find(j)->second.slot;

View File

@ -118,7 +118,7 @@ namespace gtsam {
GaussianFactor(GetKeys(terms.size(), terms.begin(), terms.end())),
model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_)
{
size_t dims[terms.size()+1];
size_t* dims = (size_t*)alloca(sizeof(size_t)*(terms.size()+1)); // FIXME: alloca is bad, just ask Google.
for(size_t j=0; j<terms.size(); ++j)
dims[j] = terms[j].second.cols();
dims[terms.size()] = 1;
@ -135,7 +135,7 @@ namespace gtsam {
GaussianFactor(GetKeys(terms.size(), terms.begin(), terms.end())),
model_(model), firstNonzeroBlocks_(b.size(), 0), Ab_(matrix_)
{
size_t dims[terms.size()+1];
size_t* dims=(size_t*)alloca(sizeof(size_t)*(terms.size()+1)); // FIXME: alloca is bad, just ask Google.
size_t j=0;
std::list<std::pair<Index, Matrix> >::const_iterator term=terms.begin();
for(; term!=terms.end(); ++term,++j)
@ -488,7 +488,7 @@ namespace gtsam {
size_t>& varDims, size_t m) {
keys_.resize(variableSlots.size());
std::transform(variableSlots.begin(), variableSlots.end(), begin(),
bind(&VariableSlots::const_iterator::value_type::first,
boost::bind(&VariableSlots::const_iterator::value_type::first,
boost::lambda::_1));
varDims.push_back(1);
Ab_.copyStructureFrom(BlockAb(matrix_, varDims.begin(), varDims.end(), m));

View File

@ -20,6 +20,7 @@
#include <boost/serialization/nvp.hpp>
#include <gtsam/base/Matrix.h>
#include <cmath>
namespace gtsam {
@ -609,7 +610,7 @@ namespace gtsam {
virtual bool equals(const Base& expected, const double tol=1e-8) const = 0;
inline double sqrtWeight(const double &error) const
{ return sqrt(weight(error)); }
{ return std::sqrt(weight(error)); }
/** produce a weight vector according to an error vector and the implemented
* robust function */

View File

@ -15,6 +15,7 @@
* @author Richard Roberts
*/
#include <cmath>
#include <gtsam/nonlinear/DoglegOptimizerImpl.h>
namespace gtsam {
@ -27,12 +28,12 @@ VectorValues DoglegOptimizerImpl::ComputeDoglegPoint(
double DeltaSq = Delta*Delta;
double x_u_norm_sq = dx_u.vector().squaredNorm();
double x_n_norm_sq = dx_n.vector().squaredNorm();
if(verbose) cout << "Steepest descent magnitude " << sqrt(x_u_norm_sq) << ", Newton's method magnitude " << sqrt(x_n_norm_sq) << endl;
if(verbose) cout << "Steepest descent magnitude " << std::sqrt(x_u_norm_sq) << ", Newton's method magnitude " << std::sqrt(x_n_norm_sq) << endl;
if(DeltaSq < x_u_norm_sq) {
// Trust region is smaller than steepest descent update
VectorValues x_d = VectorValues::SameStructure(dx_u);
x_d.vector() = dx_u.vector() * sqrt(DeltaSq / x_u_norm_sq);
if(verbose) cout << "In steepest descent region with fraction " << sqrt(DeltaSq / x_u_norm_sq) << " of steepest descent magnitude" << endl;
x_d.vector() = dx_u.vector() * std::sqrt(DeltaSq / x_u_norm_sq);
if(verbose) cout << "In steepest descent region with fraction " << std::sqrt(DeltaSq / x_u_norm_sq) << " of steepest descent magnitude" << endl;
return x_d;
} else if(DeltaSq < x_n_norm_sq) {
// Trust region boundary is between steepest descent point and Newton's method point
@ -59,7 +60,7 @@ VectorValues DoglegOptimizerImpl::ComputeBlend(double Delta, const VectorValues&
const double a = uu - 2.*un + nn;
const double b = 2. * (un - uu);
const double c = uu - Delta*Delta;
double sqrt_b_m4ac = sqrt(b*b - 4*a*c);
double sqrt_b_m4ac = std::sqrt(b*b - 4*a*c);
// Compute blending parameter
double tau1 = (-b + sqrt_b_m4ac) / (2.*a);

View File

@ -15,6 +15,7 @@
* @author Michael Kaess, Richard Roberts
*/
#include <functional>
#include <boost/range/adaptors.hpp>
#include <boost/range/algorithm.hpp>
#include <gtsam/nonlinear/ISAM2-impl.h>
@ -149,7 +150,7 @@ void ISAM2::Impl::ExpmapMasked(Values& values, const Permuted<VectorValues>& del
cout << " " << keyFormatter(key_value->key) << " (j = " << var << "), delta = " << delta[var].transpose() << endl;
}
assert(delta[var].size() == (int)key_value->value.dim());
assert(delta[var].unaryExpr(&isfinite<double>).all());
assert(delta[var].unaryExpr(ptr_fun(isfinite<double>)).all());
if(mask[var]) {
Value* retracted = key_value->value.retract_(delta[var]);
key_value->value = *retracted;
@ -305,7 +306,7 @@ size_t ISAM2::Impl::UpdateDelta(const boost::shared_ptr<ISAM2Clique>& root, std:
#ifndef NDEBUG
for(size_t j=0; j<delta.container().size(); ++j)
assert(delta.container()[j].unaryExpr(&isfinite<double>).all());
assert(delta.container()[j].unaryExpr(ptr_fun(isfinite<double>)).all());
#endif
}

View File

@ -16,6 +16,8 @@
* @created Feb 26, 2012
*/
#include <cmath>
#include <gtsam/nonlinear/LevenbergMarquardtOptimizer.h>
#include <gtsam/base/cholesky.h> // For NegativeMatrixException
@ -48,7 +50,7 @@ void LevenbergMarquardtOptimizer::iterate() {
// TODO: replace this dampening with a backsubstitution approach
GaussianFactorGraph dampedSystem(*linear);
{
double sigma = 1.0 / sqrt(state_.lambda);
double sigma = 1.0 / std::sqrt(state_.lambda);
dampedSystem.reserve(dampedSystem.size() + dimensions_.size());
// for each of the variables, add a prior
for(Index j=0; j<dimensions_.size(); ++j) {

View File

@ -27,7 +27,7 @@ namespace pose2SLAM {
Values x;
double theta = 0, dtheta = 2 * M_PI / n;
for (size_t i = 0; i < n; i++, theta += dtheta)
x.insert(PoseKey(i), Pose2(cos(theta), sin(theta), M_PI_2 + theta));
x.insert(PoseKey(i), Pose2(cos(theta), sin(theta), (M_PI/2.0) + theta));
return x;
}