void StageNode::WorldCallback() { boost::mutex::scoped_lock lock(msg_lock); this->sim_time.fromSec(world->SimTimeNow() / 1e6); // We're not allowed to publish clock==0, because it used as a special // value in parts of ROS, #4027. if(this->sim_time.sec == 0 && this->sim_time.nsec == 0) { ROS_DEBUG("Skipping initial simulation step, to avoid publishing clock==0"); return; } // TODO make this only affect one robot if necessary if((this->base_watchdog_timeout.toSec() > 0.0) && ((this->sim_time - this->base_last_cmd) >= this->base_watchdog_timeout)) { for (size_t r = 0; r < this->positionmodels.size(); r++) this->positionmodels[r]->SetSpeed(0.0, 0.0, 0.0); } // Get latest laser data for (size_t r = 0; r < this->lasermodels.size(); r++) { const std::vector<Stg::ModelRanger::Sensor>& sensors = this->lasermodels[r]->GetSensors(); if( sensors.size() > 1 ) ROS_WARN( "ROS Stage currently supports rangers with 1 sensor only." ); // for now we access only the zeroth sensor of the ranger - good // enough for most laser models that have a single beam origin const Stg::ModelRanger::Sensor& s = sensors[0]; if( s.ranges.size() ) { // Translate into ROS message format and publish this->laserMsgs[r].angle_min = -s.fov/2.0; this->laserMsgs[r].angle_max = +s.fov/2.0; this->laserMsgs[r].angle_increment = s.fov/(double)(s.sample_count-1); this->laserMsgs[r].range_min = s.range.min; this->laserMsgs[r].range_max = s.range.max; this->laserMsgs[r].ranges.resize(s.ranges.size()); this->laserMsgs[r].intensities.resize(s.intensities.size()); for(unsigned int i=0; i<s.ranges.size(); i++) { this->laserMsgs[r].ranges[i] = s.ranges[i]; this->laserMsgs[r].intensities[i] = (uint8_t)s.intensities[i]; } this->laserMsgs[r].header.frame_id = mapName("base_laser_link", r); this->laserMsgs[r].header.stamp = sim_time; this->laser_pubs_[r].publish(this->laserMsgs[r]); } // Also publish the base->base_laser_link Tx. This could eventually move // into being retrieved from the param server as a static Tx. Stg::Pose lp = this->lasermodels[r]->GetPose(); tf::Quaternion laserQ; laserQ.setRPY(0.0, 0.0, lp.a); tf::Transform txLaser = tf::Transform(laserQ, tf::Point(lp.x, lp.y, 0.15)); tf.sendTransform(tf::StampedTransform(txLaser, sim_time, mapName("base_link", r), mapName("base_laser_link", r))); // Send the identity transform between base_footprint and base_link tf::Transform txIdentity(tf::createIdentityQuaternion(), tf::Point(0, 0, 0)); tf.sendTransform(tf::StampedTransform(txIdentity, sim_time, mapName("base_footprint", r), mapName("base_link", r))); // Get latest odometry data // Translate into ROS message format and publish this->odomMsgs[r].pose.pose.position.x = this->positionmodels[r]->est_pose.x; this->odomMsgs[r].pose.pose.position.y = this->positionmodels[r]->est_pose.y; this->odomMsgs[r].pose.pose.orientation = tf::createQuaternionMsgFromYaw(this->positionmodels[r]->est_pose.a); Stg::Velocity v = this->positionmodels[r]->GetVelocity(); this->odomMsgs[r].twist.twist.linear.x = v.x; this->odomMsgs[r].twist.twist.linear.y = v.y; this->odomMsgs[r].twist.twist.angular.z = v.a; //@todo Publish stall on a separate topic when one becomes available //this->odomMsgs[r].stall = this->positionmodels[r]->Stall(); // this->odomMsgs[r].header.frame_id = mapName("odom", r); this->odomMsgs[r].header.stamp = sim_time; this->odom_pubs_[r].publish(this->odomMsgs[r]); // broadcast odometry transform tf::Quaternion odomQ; tf::quaternionMsgToTF(odomMsgs[r].pose.pose.orientation, odomQ); tf::Transform txOdom(odomQ, tf::Point(odomMsgs[r].pose.pose.position.x, odomMsgs[r].pose.pose.position.y, 0.0)); tf.sendTransform(tf::StampedTransform(txOdom, sim_time, mapName("odom", r), mapName("base_footprint", r))); // Also publish the ground truth pose and velocity Stg::Pose gpose = this->positionmodels[r]->GetGlobalPose(); Stg::Velocity gvel = this->positionmodels[r]->GetGlobalVelocity(); // Note that we correct for Stage's screwed-up coord system. tf::Quaternion q_gpose; q_gpose.setRPY(0.0, 0.0, gpose.a-M_PI/2.0); tf::Transform gt(q_gpose, tf::Point(gpose.y, -gpose.x, 0.0)); tf::Quaternion q_gvel; q_gvel.setRPY(0.0, 0.0, gvel.a-M_PI/2.0); tf::Transform gv(q_gvel, tf::Point(gvel.y, -gvel.x, 0.0)); this->groundTruthMsgs[r].pose.pose.position.x = gt.getOrigin().x(); this->groundTruthMsgs[r].pose.pose.position.y = gt.getOrigin().y(); this->groundTruthMsgs[r].pose.pose.position.z = gt.getOrigin().z(); this->groundTruthMsgs[r].pose.pose.orientation.x = gt.getRotation().x(); this->groundTruthMsgs[r].pose.pose.orientation.y = gt.getRotation().y(); this->groundTruthMsgs[r].pose.pose.orientation.z = gt.getRotation().z(); this->groundTruthMsgs[r].pose.pose.orientation.w = gt.getRotation().w(); this->groundTruthMsgs[r].twist.twist.linear.x = gv.getOrigin().x(); this->groundTruthMsgs[r].twist.twist.linear.y = gv.getOrigin().y(); //this->groundTruthMsgs[r].twist.twist.angular.z = tf::getYaw(gv.getRotation()); //this->groundTruthMsgs[r].twist.twist.linear.x = gvel.x; //this->groundTruthMsgs[r].twist.twist.linear.y = gvel.y; this->groundTruthMsgs[r].twist.twist.angular.z = gvel.a; this->groundTruthMsgs[r].header.frame_id = mapName("odom", r); this->groundTruthMsgs[r].header.stamp = sim_time; this->ground_truth_pubs_[r].publish(this->groundTruthMsgs[r]); } this->clockMsg.clock = sim_time; this->clock_pub_.publish(this->clockMsg); }
void StageNode::WorldCallback() { boost::mutex::scoped_lock lock(msg_lock); this->sim_time.fromSec(world->SimTimeNow() / 1e6); // We're not allowed to publish clock==0, because it used as a special // value in parts of ROS, #4027. if(this->sim_time.sec == 0 && this->sim_time.nsec == 0) { ROS_DEBUG("Skipping initial simulation step, to avoid publishing clock==0"); return; } // TODO make this only affect one robot if necessary if((this->base_watchdog_timeout.toSec() > 0.0) && ((this->sim_time - this->base_last_cmd) >= this->base_watchdog_timeout)) { for (size_t r = 0; r < this->positionmodels.size(); r++) this->positionmodels[r]->SetSpeed(0.0, 0.0, 0.0); } //loop on the robot models for (size_t r = 0; r < this->robotmodels_.size(); ++r) { StageRobot const * robotmodel = this->robotmodels_[r]; //loop on the laser devices for the current robot for (size_t s = 0; s < robotmodel->lasermodels.size(); ++s) { Stg::ModelRanger const* lasermodel = robotmodel->lasermodels[s]; const std::vector<Stg::ModelRanger::Sensor>& sensors = lasermodel->GetSensors(); if( sensors.size() > 1 ) ROS_WARN( "ROS Stage currently supports rangers with 1 sensor only." ); // for now we access only the zeroth sensor of the ranger - good // enough for most laser models that have a single beam origin const Stg::ModelRanger::Sensor& sensor = sensors[0]; if( sensor.ranges.size() ) { // Translate into ROS message format and publish sensor_msgs::LaserScan msg; msg.angle_min = -sensor.fov/2.0; msg.angle_max = +sensor.fov/2.0; msg.angle_increment = sensor.fov/(double)(sensor.sample_count-1); msg.range_min = sensor.range.min; msg.range_max = sensor.range.max; msg.ranges.resize(sensor.ranges.size()); msg.intensities.resize(sensor.intensities.size()); for(unsigned int i = 0; i < sensor.ranges.size(); i++) { msg.ranges[i] = sensor.ranges[i]; msg.intensities[i] = (uint8_t)sensor.intensities[i]; } if (robotmodel->lasermodels.size() > 1) msg.header.frame_id = mapName(this->laser_frame.c_str(), r, s, static_cast<Stg::Model*>(robotmodel->positionmodel)); else msg.header.frame_id = mapName(this->laser_frame.c_str(), r, static_cast<Stg::Model*>(robotmodel->positionmodel)); msg.header.stamp = sim_time; robotmodel->laser_pubs[s].publish(msg); } // Also publish the base->base_laser_link Tx. This could eventually move // into being retrieved from the param server as a static Tx. Stg::Pose lp = lasermodel->GetPose(); tf::Quaternion laserQ; laserQ.setRPY(0.0, 0.0, lp.a); tf::Transform txLaser = tf::Transform(laserQ, tf::Point(lp.x, lp.y, robotmodel->positionmodel->GetGeom().size.z + lp.z)); if (robotmodel->lasermodels.size() > 1) tf.sendTransform(tf::StampedTransform(txLaser, sim_time, mapName(this->base_frame.c_str(), r, static_cast<Stg::Model*>(robotmodel->positionmodel)), mapName(this->laser_frame.c_str(), r, s, static_cast<Stg::Model*>(robotmodel->positionmodel)))); else tf.sendTransform(tf::StampedTransform(txLaser, sim_time, mapName(this->base_frame.c_str(), r, static_cast<Stg::Model*>(robotmodel->positionmodel)), mapName(this->laser_frame.c_str(), r, static_cast<Stg::Model*>(robotmodel->positionmodel)))); } //the position of the robot tf.sendTransform(tf::StampedTransform(tf::Transform::getIdentity(), sim_time, mapName(this->base_footprint_frame.c_str(), r, static_cast<Stg::Model*>(robotmodel->positionmodel)), mapName(this->base_frame.c_str(), r, static_cast<Stg::Model*>(robotmodel->positionmodel)))); // Get latest odometry data // Translate into ROS message format and publish nav_msgs::Odometry odom_msg; odom_msg.pose.pose.position.x = robotmodel->positionmodel->est_pose.x; odom_msg.pose.pose.position.y = robotmodel->positionmodel->est_pose.y; odom_msg.pose.pose.orientation = tf::createQuaternionMsgFromYaw(robotmodel->positionmodel->est_pose.a); Stg::Velocity v = robotmodel->positionmodel->GetVelocity(); odom_msg.twist.twist.linear.x = v.x; odom_msg.twist.twist.linear.y = v.y; odom_msg.twist.twist.angular.z = v.a; //@todo Publish stall on a separate topic when one becomes available //this->odomMsgs[r].stall = this->positionmodels[r]->Stall(); // odom_msg.header.frame_id = mapName(this->odom_frame.c_str() , r, static_cast<Stg::Model*>(robotmodel->positionmodel)); odom_msg.header.stamp = sim_time; robotmodel->odom_pub.publish(odom_msg); // broadcast odometry transform tf::Quaternion odomQ; tf::quaternionMsgToTF(odom_msg.pose.pose.orientation, odomQ); tf::Transform txOdom(odomQ, tf::Point(odom_msg.pose.pose.position.x, odom_msg.pose.pose.position.y, 0.0)); tf.sendTransform(tf::StampedTransform(txOdom, sim_time, mapName(this->odom_frame.c_str(), r, static_cast<Stg::Model*>(robotmodel->positionmodel)), mapName(this->base_footprint_frame.c_str(), r, static_cast<Stg::Model*>(robotmodel->positionmodel)))); // Also publish the ground truth pose and velocity Stg::Pose gpose = robotmodel->positionmodel->GetGlobalPose(); tf::Quaternion q_gpose; q_gpose.setRPY(0.0, 0.0, gpose.a); tf::Transform gt(q_gpose, tf::Point(gpose.x, gpose.y, 0.0)); // Velocity is 0 by default and will be set only if there is previous pose and time delta>0 Stg::Velocity gvel(0,0,0,0); if (this->base_last_globalpos.size()>r){ Stg::Pose prevpose = this->base_last_globalpos.at(r); double dT = (this->sim_time-this->base_last_globalpos_time).toSec(); if (dT>0) gvel = Stg::Velocity( (gpose.x - prevpose.x)/dT, (gpose.y - prevpose.y)/dT, (gpose.z - prevpose.z)/dT, Stg::normalize(gpose.a - prevpose.a)/dT ); this->base_last_globalpos.at(r) = gpose; }else //There are no previous readings, adding current pose... this->base_last_globalpos.push_back(gpose); nav_msgs::Odometry ground_truth_msg; ground_truth_msg.pose.pose.position.x = gt.getOrigin().x(); ground_truth_msg.pose.pose.position.y = gt.getOrigin().y(); ground_truth_msg.pose.pose.position.z = gt.getOrigin().z(); ground_truth_msg.pose.pose.orientation.x = gt.getRotation().x(); ground_truth_msg.pose.pose.orientation.y = gt.getRotation().y(); ground_truth_msg.pose.pose.orientation.z = gt.getRotation().z(); ground_truth_msg.pose.pose.orientation.w = gt.getRotation().w(); ground_truth_msg.twist.twist.linear.x = gvel.x; ground_truth_msg.twist.twist.linear.y = gvel.y; ground_truth_msg.twist.twist.linear.z = gvel.z; ground_truth_msg.twist.twist.angular.z = gvel.a; ground_truth_msg.header.frame_id = mapName(this->odom_frame.c_str(), r, static_cast<Stg::Model*>(robotmodel->positionmodel)); ground_truth_msg.header.stamp = sim_time; robotmodel->ground_truth_pub.publish(ground_truth_msg); //cameras for (size_t s = 0; s < robotmodel->cameramodels.size(); ++s) { Stg::ModelCamera* cameramodel = robotmodel->cameramodels[s]; // Get latest image data // Translate into ROS message format and publish if (robotmodel->image_pubs[s].getNumSubscribers() > 0 && cameramodel->FrameColor()) { sensor_msgs::Image image_msg; image_msg.height = cameramodel->getHeight(); image_msg.width = cameramodel->getWidth(); image_msg.encoding = "rgba8"; //this->imageMsgs[r].is_bigendian=""; image_msg.step = image_msg.width*4; image_msg.data.resize(image_msg.width * image_msg.height * 4); memcpy(&(image_msg.data[0]), cameramodel->FrameColor(), image_msg.width * image_msg.height * 4); //invert the opengl weirdness int height = image_msg.height - 1; int linewidth = image_msg.width*4; char* temp = new char[linewidth]; for (int y = 0; y < (height+1)/2; y++) { memcpy(temp,&image_msg.data[y*linewidth],linewidth); memcpy(&(image_msg.data[y*linewidth]),&(image_msg.data[(height-y)*linewidth]),linewidth); memcpy(&(image_msg.data[(height-y)*linewidth]),temp,linewidth); } if (robotmodel->cameramodels.size() > 1) image_msg.header.frame_id = mapName("camera", r, s, static_cast<Stg::Model*>(robotmodel->positionmodel)); else image_msg.header.frame_id = mapName("camera", r,static_cast<Stg::Model*>(robotmodel->positionmodel)); image_msg.header.stamp = sim_time; robotmodel->image_pubs[s].publish(image_msg); } //Get latest depth data //Translate into ROS message format and publish //Skip if there are no subscribers if (robotmodel->depth_pubs[s].getNumSubscribers()>0 && cameramodel->FrameDepth()) { sensor_msgs::Image depth_msg; depth_msg.height = cameramodel->getHeight(); depth_msg.width = cameramodel->getWidth(); depth_msg.encoding = this->isDepthCanonical?sensor_msgs::image_encodings::TYPE_32FC1:sensor_msgs::image_encodings::TYPE_16UC1; //this->depthMsgs[r].is_bigendian=""; int sz = this->isDepthCanonical?sizeof(float):sizeof(uint16_t); size_t len = depth_msg.width * depth_msg.height; depth_msg.step = depth_msg.width * sz; depth_msg.data.resize(len*sz); //processing data according to REP118 if (this->isDepthCanonical){ double nearClip = cameramodel->getCamera().nearClip(); double farClip = cameramodel->getCamera().farClip(); memcpy(&(depth_msg.data[0]),cameramodel->FrameDepth(),len*sz); float * data = (float*)&(depth_msg.data[0]); for (size_t i=0;i<len;++i) if(data[i]<=nearClip) data[i] = -INFINITY; else if(data[i]>=farClip) data[i] = INFINITY; } else{ int nearClip = (int)(cameramodel->getCamera().nearClip() * 1000); int farClip = (int)(cameramodel->getCamera().farClip() * 1000); for (size_t i=0;i<len;++i){ int v = (int)(cameramodel->FrameDepth()[i]*1000); if (v<=nearClip || v>=farClip) v = 0; ((uint16_t*)&(depth_msg.data[0]))[i] = (uint16_t) ((v<=nearClip || v>=farClip) ? 0 : v ); } } //invert the opengl weirdness int height = depth_msg.height - 1; int linewidth = depth_msg.width*sz; char* temp = new char[linewidth]; for (int y = 0; y < (height+1)/2; y++) { memcpy(temp,&depth_msg.data[y*linewidth],linewidth); memcpy(&(depth_msg.data[y*linewidth]),&(depth_msg.data[(height-y)*linewidth]),linewidth); memcpy(&(depth_msg.data[(height-y)*linewidth]),temp,linewidth); } if (robotmodel->cameramodels.size() > 1) depth_msg.header.frame_id = mapName("camera", r, s, static_cast<Stg::Model*>(robotmodel->positionmodel)); else depth_msg.header.frame_id = mapName("camera", r, static_cast<Stg::Model*>(robotmodel->positionmodel)); depth_msg.header.stamp = sim_time; robotmodel->depth_pubs[s].publish(depth_msg); } //sending camera's tf and info only if image or depth topics are subscribed to if ((robotmodel->image_pubs[s].getNumSubscribers()>0 && cameramodel->FrameColor()) || (robotmodel->depth_pubs[s].getNumSubscribers()>0 && cameramodel->FrameDepth())) { Stg::Pose lp = cameramodel->GetPose(); tf::Quaternion Q; Q.setRPY( (cameramodel->getCamera().pitch()*M_PI/180.0)-M_PI, 0.0, lp.a+(cameramodel->getCamera().yaw()*M_PI/180.0) - robotmodel->positionmodel->GetPose().a ); tf::Transform tr = tf::Transform(Q, tf::Point(lp.x, lp.y, robotmodel->positionmodel->GetGeom().size.z+lp.z)); if (robotmodel->cameramodels.size() > 1) tf.sendTransform(tf::StampedTransform(tr, sim_time, mapName(this->base_frame.c_str(), r, static_cast<Stg::Model*>(robotmodel->positionmodel)), mapName("camera", r, s, static_cast<Stg::Model*>(robotmodel->positionmodel)))); else tf.sendTransform(tf::StampedTransform(tr, sim_time, mapName(this->base_frame.c_str(), r, static_cast<Stg::Model*>(robotmodel->positionmodel)), mapName("camera", r, static_cast<Stg::Model*>(robotmodel->positionmodel)))); sensor_msgs::CameraInfo camera_msg; if (robotmodel->cameramodels.size() > 1) camera_msg.header.frame_id = mapName("camera", r, s, static_cast<Stg::Model*>(robotmodel->positionmodel)); else camera_msg.header.frame_id = mapName("camera", r, static_cast<Stg::Model*>(robotmodel->positionmodel)); camera_msg.header.stamp = sim_time; camera_msg.height = cameramodel->getHeight(); camera_msg.width = cameramodel->getWidth(); double fx,fy,cx,cy; cx = camera_msg.width / 2.0; cy = camera_msg.height / 2.0; double fovh = cameramodel->getCamera().horizFov()*M_PI/180.0; double fovv = cameramodel->getCamera().vertFov()*M_PI/180.0; //double fx_ = 1.43266615300557*this->cameramodels[r]->getWidth()/tan(fovh); //double fy_ = 1.43266615300557*this->cameramodels[r]->getHeight()/tan(fovv); fx = cameramodel->getWidth()/(2*tan(fovh/2)); fy = cameramodel->getHeight()/(2*tan(fovv/2)); //ROS_INFO("fx=%.4f,%.4f; fy=%.4f,%.4f", fx, fx_, fy, fy_); camera_msg.D.resize(4, 0.0); camera_msg.K[0] = fx; camera_msg.K[2] = cx; camera_msg.K[4] = fy; camera_msg.K[5] = cy; camera_msg.K[8] = 1.0; camera_msg.R[0] = 1.0; camera_msg.R[4] = 1.0; camera_msg.R[8] = 1.0; camera_msg.P[0] = fx; camera_msg.P[2] = cx; camera_msg.P[5] = fy; camera_msg.P[6] = cy; camera_msg.P[10] = 1.0; robotmodel->camera_pubs[s].publish(camera_msg); } } } this->base_last_globalpos_time = this->sim_time; rosgraph_msgs::Clock clock_msg; clock_msg.clock = sim_time; this->clock_pub_.publish(clock_msg); }