No preloading of tf in the assets writer. (#524)
This follows the logic used in the offline node to only keep a smaller buffer of tf data. This is necessary for performance. Fixes #349.master
parent
a2c8a8fe2d
commit
e79754bf71
|
@ -70,38 +70,6 @@ namespace {
|
|||
constexpr char kTfStaticTopic[] = "/tf_static";
|
||||
namespace carto = ::cartographer;
|
||||
|
||||
// TODO(hrapp): We discovered that using tf_buffer with a large CACHE
|
||||
// is very inefficient. Switch asset writer to use our own
|
||||
// TransformInterpolationBuffer.
|
||||
void ReadTransformsFromBag(const string& bag_filename,
|
||||
tf2_ros::Buffer* const tf_buffer) {
|
||||
rosbag::Bag bag;
|
||||
bag.open(bag_filename, rosbag::bagmode::Read);
|
||||
rosbag::View view(bag);
|
||||
|
||||
const ::ros::Time begin_time = view.getBeginTime();
|
||||
const double duration_in_seconds = (view.getEndTime() - begin_time).toSec();
|
||||
for (const rosbag::MessageInstance& msg : view) {
|
||||
if (msg.isType<tf2_msgs::TFMessage>()) {
|
||||
const auto tf_msg = msg.instantiate<tf2_msgs::TFMessage>();
|
||||
for (const auto& transform : tf_msg->transforms) {
|
||||
try {
|
||||
// TODO(damonkohler): Handle topic remapping.
|
||||
tf_buffer->setTransform(transform, "unused_authority",
|
||||
msg.getTopic() == kTfStaticTopic);
|
||||
} catch (const tf2::TransformException& ex) {
|
||||
LOG(WARNING) << ex.what();
|
||||
}
|
||||
}
|
||||
}
|
||||
LOG_EVERY_N(INFO, 100000)
|
||||
<< "Processed " << (msg.getTime() - begin_time).toSec() << " of "
|
||||
<< duration_in_seconds << " bag time seconds...";
|
||||
}
|
||||
|
||||
bag.close();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::unique_ptr<carto::io::PointsBatch> HandleMessage(
|
||||
const T& message, const string& tracking_frame,
|
||||
|
@ -186,12 +154,7 @@ void Run(const string& pose_graph_filename,
|
|||
if (trajectory_proto.node_size() == 0) {
|
||||
continue;
|
||||
}
|
||||
tf2_ros::Buffer tf_buffer(::ros::DURATION_MAX);
|
||||
if (FLAGS_use_bag_transforms) {
|
||||
LOG(INFO) << "Pre-loading transforms from bag...";
|
||||
ReadTransformsFromBag(bag_filename, &tf_buffer);
|
||||
}
|
||||
|
||||
tf2_ros::Buffer tf_buffer;
|
||||
if (!urdf_filename.empty()) {
|
||||
ReadStaticTransformsFromUrdf(urdf_filename, &tf_buffer);
|
||||
}
|
||||
|
@ -205,25 +168,55 @@ void Run(const string& pose_graph_filename,
|
|||
const double duration_in_seconds =
|
||||
(view.getEndTime() - begin_time).toSec();
|
||||
|
||||
// We need to keep 'tf_buffer' small because it becomes very inefficient
|
||||
// otherwise. We make sure that tf_messages are published before any data
|
||||
// messages, so that tf lookups always work.
|
||||
std::deque<rosbag::MessageInstance> delayed_messages;
|
||||
// We publish tf messages one second earlier than other messages. Under
|
||||
// the assumption of higher frequency tf this should ensure that tf can
|
||||
// always interpolate.
|
||||
const ::ros::Duration kDelay(1.);
|
||||
for (const rosbag::MessageInstance& message : view) {
|
||||
if (FLAGS_use_bag_transforms && message.isType<tf2_msgs::TFMessage>()) {
|
||||
auto tf_message = message.instantiate<tf2_msgs::TFMessage>();
|
||||
for (const auto& transform : tf_message->transforms) {
|
||||
try {
|
||||
tf_buffer.setTransform(transform, "unused_authority",
|
||||
message.getTopic() == kTfStaticTopic);
|
||||
} catch (const tf2::TransformException& ex) {
|
||||
LOG(WARNING) << ex.what();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (!delayed_messages.empty() &&
|
||||
delayed_messages.front().getTime() <
|
||||
message.getTime() - kDelay) {
|
||||
const rosbag::MessageInstance& delayed_message =
|
||||
delayed_messages.front();
|
||||
|
||||
std::unique_ptr<carto::io::PointsBatch> points_batch;
|
||||
if (message.isType<sensor_msgs::PointCloud2>()) {
|
||||
if (delayed_message.isType<sensor_msgs::PointCloud2>()) {
|
||||
points_batch = HandleMessage(
|
||||
*message.instantiate<sensor_msgs::PointCloud2>(), tracking_frame,
|
||||
tf_buffer, transform_interpolation_buffer);
|
||||
} else if (message.isType<sensor_msgs::MultiEchoLaserScan>()) {
|
||||
points_batch = HandleMessage(
|
||||
*message.instantiate<sensor_msgs::MultiEchoLaserScan>(),
|
||||
*delayed_message.instantiate<sensor_msgs::PointCloud2>(),
|
||||
tracking_frame, tf_buffer, transform_interpolation_buffer);
|
||||
} else if (message.isType<sensor_msgs::LaserScan>()) {
|
||||
} else if (delayed_message
|
||||
.isType<sensor_msgs::MultiEchoLaserScan>()) {
|
||||
points_batch = HandleMessage(
|
||||
*message.instantiate<sensor_msgs::LaserScan>(), tracking_frame,
|
||||
tf_buffer, transform_interpolation_buffer);
|
||||
*delayed_message.instantiate<sensor_msgs::MultiEchoLaserScan>(),
|
||||
tracking_frame, tf_buffer, transform_interpolation_buffer);
|
||||
} else if (delayed_message.isType<sensor_msgs::LaserScan>()) {
|
||||
points_batch = HandleMessage(
|
||||
*delayed_message.instantiate<sensor_msgs::LaserScan>(),
|
||||
tracking_frame, tf_buffer, transform_interpolation_buffer);
|
||||
}
|
||||
if (points_batch != nullptr) {
|
||||
points_batch->trajectory_id = trajectory_id;
|
||||
pipeline.back()->Process(std::move(points_batch));
|
||||
}
|
||||
delayed_messages.pop_front();
|
||||
}
|
||||
delayed_messages.push_back(message);
|
||||
LOG_EVERY_N(INFO, 100000)
|
||||
<< "Processed " << (message.getTime() - begin_time).toSec()
|
||||
<< " of " << duration_in_seconds << " bag time seconds...";
|
||||
|
|
|
@ -141,10 +141,14 @@ void Run(const std::vector<string>& bag_filenames) {
|
|||
const ::ros::Time begin_time = view.getBeginTime();
|
||||
const double duration_in_seconds = (view.getEndTime() - begin_time).toSec();
|
||||
|
||||
// We make sure that tf_messages are published before any data messages, so
|
||||
// that tf lookups always work and that tf_buffer has a small cache size -
|
||||
// because it gets very inefficient with a large one.
|
||||
// We need to keep 'tf_buffer' small because it becomes very inefficient
|
||||
// otherwise. We make sure that tf_messages are published before any data
|
||||
// messages, so that tf lookups always work.
|
||||
std::deque<rosbag::MessageInstance> delayed_messages;
|
||||
// We publish tf messages one second earlier than other messages. Under
|
||||
// the assumption of higher frequency tf this should ensure that tf can
|
||||
// always interpolate.
|
||||
const ::ros::Duration kDelay(1.);
|
||||
for (const rosbag::MessageInstance& msg : view) {
|
||||
if (!::ros::ok()) {
|
||||
break;
|
||||
|
@ -166,7 +170,7 @@ void Run(const std::vector<string>& bag_filenames) {
|
|||
|
||||
while (!delayed_messages.empty() &&
|
||||
delayed_messages.front().getTime() <
|
||||
msg.getTime() - ::ros::Duration(1.)) {
|
||||
msg.getTime() - kDelay) {
|
||||
const rosbag::MessageInstance& delayed_msg = delayed_messages.front();
|
||||
const string topic = node.node_handle()->resolveName(
|
||||
delayed_msg.getTopic(), false /* resolve */);
|
||||
|
|
Loading…
Reference in New Issue