Esempio n. 1
1
    	/// Continuously advertises xyz and grey images.
	bool spin()
	{
		boost::mutex::scoped_lock lock(service_mutex_);
		sensor_msgs::Image::Ptr xyz_image_msg_ptr;
		sensor_msgs::Image::Ptr grey_image_msg_ptr;
		sensor_msgs::CameraInfo tof_image_info;
	
		if(tof_camera_->AcquireImages(0, &grey_image_32F1_, &xyz_image_32F3_, false, false, ipa_CameraSensors::INTENSITY) & ipa_Utils::RET_FAILED)
		{
			ROS_ERROR("[tof_camera] Tof image acquisition failed");
			return false;
		}

		/// Filter images by amplitude and remove tear-off edges
		//if(filter_xyz_tearoff_edges_ || filter_xyz_by_amplitude_)
		//	ROS_ERROR("[tof_camera] FUNCTION UNCOMMENT BY JSF");
		if(filter_xyz_tearoff_edges_) ipa_Utils::FilterTearOffEdges(xyz_image_32F3_, 0, (float)tearoff_tear_half_fraction_);
		if(filter_xyz_by_amplitude_) ipa_Utils::FilterByAmplitude(xyz_image_32F3_, grey_image_32F1_, 0, 0, lower_amplitude_threshold_, upper_amplitude_threshold_);

		try
		{
			IplImage img = xyz_image_32F3_;
			xyz_image_msg_ptr = sensor_msgs::CvBridge::cvToImgMsg(&img, "passthrough");
		}
		catch (sensor_msgs::CvBridgeException error)
		{
			ROS_ERROR("[tof_camera] Could not convert 32bit xyz IplImage to ROS message");
			return false;
		}

		try
		{
			IplImage img = grey_image_32F1_;
			grey_image_msg_ptr = sensor_msgs::CvBridge::cvToImgMsg(&img, "passthrough");
		}
		catch (sensor_msgs::CvBridgeException error)
		{
			ROS_ERROR("[tof_camera] Could not convert 32bit grey IplImage to ROS message");
			return false;
		}

		/// Set time stamp
		ros::Time now = ros::Time::now();
		xyz_image_msg_ptr->header.stamp = now;
		grey_image_msg_ptr->header.stamp = now;

		tof_image_info = camera_info_msg_;
		tof_image_info.width = grey_image_32F1_.cols;
		tof_image_info.height = grey_image_32F1_.rows;
		tof_image_info.header.stamp = now;

		/// publish message
		xyz_image_publisher_.publish(*xyz_image_msg_ptr, tof_image_info);
		grey_image_publisher_.publish(*grey_image_msg_ptr, tof_image_info);

		return true;
	}
Esempio n. 2
0
  void imageCB(const sensor_msgs::ImageConstPtr& image_l,
	       const sensor_msgs::CameraInfoConstPtr& info_l,
	       const sensor_msgs::ImageConstPtr& image_r,
	       const sensor_msgs::CameraInfoConstPtr& info_r)  {

    sensor_msgs::Image img = *image_r;
    sensor_msgs::CameraInfo info = *info_r;

    //img.header.stamp = image_l->header.stamp;
    //info.header.stamp = info_l->header.stamp;

    pub_left_.publish(image_l, info_l);
    pub_right_.publish(img, info, info_l->header.stamp);
    //pub_right_.publish(*image_r, *info_r, info_l->header.stamp);
  }
void
pubRealSenseInfraredImageMsg(cv::Mat& ir_mat)
{
	sensor_msgs::ImagePtr ir_img(new sensor_msgs::Image);;

	ir_img->header.seq = head_sequence_id;
	ir_img->header.stamp = head_time_stamp;
	ir_img->header.frame_id = depth_frame_id;


	ir_img->width = ir_mat.cols;
	ir_img->height = ir_mat.rows;

	ir_img->encoding = sensor_msgs::image_encodings::MONO8;
	ir_img->is_bigendian = 0;

	int step = sizeof(unsigned char) * ir_img->width;
	int size = step * ir_img->height;
	ir_img->step = step;
	ir_img->data.resize(size);
	memcpy(&ir_img->data[0], ir_mat.data, size);

	ir_camera_info->header.frame_id = depth_frame_id;
	ir_camera_info->header.stamp = head_time_stamp;
	ir_camera_info->header.seq = head_sequence_id;

	realsense_infrared_image_pub.publish(ir_img, ir_camera_info);
}
void
pubRealSenseDepthImageMsg32F(cv::Mat& depth_mat)
{
  sensor_msgs::ImagePtr depth_img(new sensor_msgs::Image);

  depth_img->header.seq = head_sequence_id;
  depth_img->header.stamp = head_time_stamp;
  depth_img->header.frame_id = depth_frame_id;

  depth_img->width = depth_mat.cols;
  depth_img->height = depth_mat.rows;

  depth_img->encoding = sensor_msgs::image_encodings::TYPE_32FC1;
  depth_img->is_bigendian = 0;

  int step = sizeof(float) * depth_img->width;
  int size = step * depth_img->height;
  depth_img->step = step;
  depth_img->data.resize(size);
  memcpy(&depth_img->data[0], depth_mat.data, size);

  ir_camera_info->header.frame_id = depth_frame_id;
  ir_camera_info->header.stamp = head_time_stamp;
  ir_camera_info->header.seq = head_sequence_id;

/*double minVal, maxVal;
cv::minMaxLoc(depth_mat, &minVal, &maxVal);
std::cout << " ***** " << minVal << " " << maxVal << std::endl;*/

  realsense_depth_image_pub.publish(depth_img, ir_camera_info);
//pub_depth_info.publish(ir_camera_info);
}
Esempio n. 5
0
// Handles (un)subscribing when clients (un)subscribe
void CropDecimateNodelet::connectCb()
{
  boost::lock_guard<boost::mutex> lock(connect_mutex_);
  if (pub_.getNumSubscribers() == 0)
    sub_.shutdown();
  else if (!sub_)
    sub_ = it_in_->subscribeCamera("image_raw", queue_size_, &CropDecimateNodelet::imageCb, this);
}
Esempio n. 6
0
  virtual ~SensorStreamManager()
  {
    stream_.removeNewFrameListener(this);
    stream_.stop();
    stream_.destroy();

    publisher_.shutdown();
  }
int getNumDepthSubscribers()
{
    int n = realsense_points_pub.getNumSubscribers() + realsense_reg_points_pub.getNumSubscribers() + realsense_depth_image_pub.getNumSubscribers();
#ifdef V4L2_PIX_FMT_INZI
    n += realsense_infrared_image_pub.getNumSubscribers();
#endif
    return n;
}
  void publishImage (FlyCapture2::Image * frame)
  {
    //sensor_msgs::CameraInfoPtr cam_info(new sensor_msgs::CameraInfo(cam_info_->getCameraInfo()));
    // or with a member variable:
    cam_info_ptr_ = sensor_msgs::CameraInfoPtr(new sensor_msgs::CameraInfo(cam_info_mgr_->getCameraInfo()));

    if (processFrame (frame, img_, cam_info_ptr_))
      streaming_pub_.publish (img_, *cam_info_ptr_);
  }
  void stop ()
  {
    if (!running)
      return;

    cam_->stop ();              // Must stop camera before streaming_pub_.
    poll_srv_.shutdown ();
    streaming_pub_.shutdown ();

    running = false;
  }
Esempio n. 10
0
// Handles (un)subscribing when clients (un)subscribe
void CropDecimateNodelet::connectCb()
{
  boost::lock_guard<boost::mutex> lock(connect_mutex_);
  if (pub_.getNumSubscribers() == 0)
    sub_.shutdown();
  else if (!sub_)
  {
    image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle());
    sub_ = it_in_->subscribeCamera("image_raw", queue_size_, &CropDecimateNodelet::imageCb, this, hints);
  }
}
Esempio n. 11
0
  virtual void onSubscriptionChanged(const image_transport::SingleSubscriberPublisher& topic)
  {
    size_t disparity_clients = disparity_publisher_.getNumSubscribers() + disparity_registered_publisher_.getNumSubscribers();
    size_t depth_clients = publisher_.getNumSubscribers() + depth_registered_publisher_.getNumSubscribers();
    size_t all_clients = disparity_clients + depth_clients;

    if(!running_ && all_clients > 0)
    {
      running_ = (stream_.start() == STATUS_OK);
    }
    else if(running_ && all_clients == 0)
    {
      stream_.stop();
      running_ = false;
    }

    if(running_)
    {
      updateActivePublisher();
    }
  }
Esempio n. 12
0
  bool take_and_send_image()
  {
    usb_cam_camera_grab_image(camera_image_);
    fillImage(img_, "rgb8", camera_image_->height, camera_image_->width, 3 * camera_image_->width, camera_image_->image);
    img_.header.stamp = ros::Time::now();

    sensor_msgs::CameraInfoPtr ci(new sensor_msgs::CameraInfo(cinfo_->getCameraInfo()));
    ci->header.frame_id = img_.header.frame_id;
    ci->header.stamp = img_.header.stamp;

    image_pub_.publish(img_, *ci);
    return true;
  }
Esempio n. 13
0
  void OpenNINode::publishRgbImage(const openni_wrapper::Image& rgb_img, image_transport::CameraPublisher img_pub) const {
      sensor_msgs::ImagePtr rgb_msg = boost::make_shared<sensor_msgs::Image>();
      ros::Time time = ros::Time::now();
      rgb_msg->header.stamp    = time;
      rgb_msg->encoding        = enc::RGB8;
      rgb_msg->width           = depth_width_;
      rgb_msg->height          = depth_height_;
      rgb_msg->step            = rgb_msg->width * 3;
      rgb_msg->data.resize(rgb_msg->height * rgb_msg->step);

      rgb_img.fillRGB(rgb_msg->width, rgb_msg->height, reinterpret_cast<unsigned char*>(&rgb_msg->data[0]), rgb_msg->step);

      img_pub.publish(rgb_msg, getDepthCameraInfo(rgb_msg->width, rgb_msg->height, time, 1));
    }
Esempio n. 14
0
void publishframe(camera_info_manager::CameraInfoManager *mgr, const image_transport::CameraPublisher &campub, const sensor_msgs::ImagePtr img, const std::string& frame)
{
  ros::Time timestamp = ros::Time::now();
  sensor_msgs::CameraInfo::Ptr cinfo(new sensor_msgs::CameraInfo(mgr->getCameraInfo()));
  std_msgs::Header::Ptr imageheader(new std_msgs::Header());
  std_msgs::Header::Ptr cinfoheader(new std_msgs::Header());
  imageheader->frame_id = frame;
  cinfoheader->frame_id = frame;
  imageheader->stamp = timestamp;
  cinfoheader->stamp = timestamp;
  img->header = *imageheader;
  cinfo->header = *cinfoheader;
  campub.publish(img, cinfo);
}
Esempio n. 15
0
  void OpenNINode::publishDepthImage(const openni_wrapper::DepthImage& depth, image_transport::CameraPublisher depth_pub) const {
      sensor_msgs::ImagePtr depth_msg = boost::make_shared<sensor_msgs::Image>();
      ros::Time time = ros::Time::now();
      depth_msg->header.stamp    = time;
      depth_msg->encoding        = sensor_msgs::image_encodings::TYPE_32FC1;
      depth_msg->width           = depth_width_;
      depth_msg->height          = depth_height_;
      depth_msg->step            = depth_msg->width * sizeof(float);
      depth_msg->data.resize(depth_msg->height * depth_msg->step);

      depth.fillDepthImage(depth_msg->width, depth_msg->height, reinterpret_cast<float*>(&depth_msg->data[0]), depth_msg->step);

      depth_pub.publish(depth_msg, getDepthCameraInfo(depth_msg->width, depth_msg->height, time, 1));
      //pub.publish(depth_msg);
    }
Esempio n. 16
0
  bool take_and_send_image()
  {
    // grab the image
    cam_.grab_image(&img_);

    // grab the camera info
    sensor_msgs::CameraInfoPtr ci(new sensor_msgs::CameraInfo(cinfo_->getCameraInfo()));
    ci->header.frame_id = img_.header.frame_id;
    ci->header.stamp = img_.header.stamp;

    // publish the image
    image_pub_.publish(img_, *ci);

    return true;
  }
void
pubRealSenseRGBImageMsg(cv::Mat& rgb_mat)
{
	sensor_msgs::ImagePtr rgb_img(new sensor_msgs::Image);

	rgb_img->header.seq = head_sequence_id;
	rgb_img->header.stamp = head_time_stamp;
	rgb_img->header.frame_id = rgb_frame_id;

	rgb_img->width = rgb_mat.cols;
	rgb_img->height = rgb_mat.rows;

	rgb_img->encoding = sensor_msgs::image_encodings::BGR8;
	rgb_img->is_bigendian = 0;

	int step = sizeof(unsigned char) * 3 * rgb_img->width;
	int size = step * rgb_img->height;
	rgb_img->step = step;
	rgb_img->data.resize(size);
	memcpy(&(rgb_img->data[0]), rgb_mat.data, size);

    rgb_camera_info->header.frame_id = rgb_frame_id;
    rgb_camera_info->header.stamp = head_time_stamp;
    rgb_camera_info->header.seq = head_sequence_id;

	realsense_rgb_image_pub.publish(rgb_img, rgb_camera_info);
  //pub_rgb_info.publish(rgb_camera_info);


	//save rgb img
//	static int count = 0;
//	count++;
//	if(count > 0)
//	{
//	    struct timeval save_time;
//        gettimeofday( &save_time, NULL );
//        char save_name[256];
//        sprintf(save_name, "~/temp/realsense_rgb_%d.jpg", (int)save_time.tv_sec);
//        printf("\nsave realsense rgb img: %s\n", save_name);
//	    cv::imwrite(save_name, rgb_mat);
//	    count = 0;
//	}
}
Esempio n. 18
0
  /** Publish camera stream topics
   *
   *  @pre image_ contains latest camera frame
   */
  void publish()
  {
    image_.header.frame_id = config_.frame_id;

    // get current CameraInfo data
    cam_info_ = cinfo_->getCameraInfo();

    //ROS_INFO_STREAM ("cam: " << cam_info_.width << " x " << cam_info_.height << ", image: " << image_.width << " x " << image_.height);
    if (cam_info_.height != image_.height || cam_info_.width != image_.width)
      {
        // image size does not match: publish a matching uncalibrated
        // CameraInfo instead
        if (calibration_matches_)
          {
            // warn user once
            calibration_matches_ = false;
            ROS_WARN_STREAM("[" << camera_name_
                                 << "] camera_info_url: " << config_.camera_info_url);
            ROS_WARN_STREAM("[" << camera_name_
                            << "] calibration (" << cam_info_.width << "x" << cam_info_.height << ") does not match video mode (" << image_.width << "x" << image_.height << ") "
                            << "(publishing uncalibrated data)");
          }
        cam_info_ = sensor_msgs::CameraInfo();
        cam_info_.height = image_.height;
        cam_info_.width = image_.width;
      }
    else if (!calibration_matches_)
      {
        // calibration OK now
        calibration_matches_ = true;
        ROS_INFO_STREAM("[" << camera_name_
                        << "] calibration matches video mode now");
      }

    cam_info_.header.frame_id = config_.frame_id;
    cam_info_.header.stamp = image_.header.stamp;

    // @todo log a warning if (filtered) time since last published
    // image is not reasonably close to configured frame_rate

    // Publish via image_transport
    image_pub_.publish(image_, cam_info_);
  }
void constMaskCb(const stereo_msgs::DisparityImageConstPtr& msg)
{
    if(!image_rect.empty())
    {
        cv::Mat masked;
        image_rect.copyTo(masked, mask);
        cv_bridge::CvImage masked_msg;
        masked_msg.header = msg->header;
        masked_msg.encoding = sensor_msgs::image_encodings::BGR8;

        //if we want rescale then rescale
        if(scale_factor > 1)
        {
            cv::Mat masked_tmp = masked;
            cv::resize(masked_tmp, masked,
                       cv::Size(masked.cols*scale_factor, masked.rows*scale_factor));
        }
        masked_msg.image = masked;
        cam_pub.publish(*masked_msg.toImageMsg(), camera_info);
    }

    //    ROS_INFO("disparityCb runtime: %f ms",
    //             1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
}
Esempio n. 20
0
	/// Callback function for image requests on topic 'request_image'
	void spin()
	{
		ros::Rate rate(3);
		while(node_handle_.ok())
		{
			// ROS images
			sensor_msgs::Image right_color_image_msg;
			sensor_msgs::Image left_color_image_msg;
			sensor_msgs::Image xyz_tof_image_msg;
			sensor_msgs::Image grey_tof_image_msg;
		
			// ROS camera information messages
			sensor_msgs::CameraInfo right_color_image_info;
			sensor_msgs::CameraInfo left_color_image_info;
			sensor_msgs::CameraInfo tof_image_info;
	
			ros::Time now = ros::Time::now();
	
			// Acquire left color image
			if (left_color_camera_)
			{
				//ROS_INFO("[all_cameras] LEFT");
		   		/// Release previously acquired IplImage 
				if (left_color_image_8U3_) 
				{
					cvReleaseImage(&left_color_image_8U3_);
					left_color_image_8U3_ = 0;
				}
		
				/// Acquire new image
				if (left_color_camera_->GetColorImage2(&left_color_image_8U3_, false) & ipa_Utils::RET_FAILED)
				{
					ROS_ERROR("[all_cameras] Left color image acquisition failed");
					break;
				}

				try
		  		{
					left_color_image_msg = *(sensor_msgs::CvBridge::cvToImgMsg(left_color_image_8U3_, "bgr8"));
				}
				catch (sensor_msgs::CvBridgeException error)
				{
					ROS_ERROR("[all_cameras] Could not convert left IplImage to ROS message");
					break;
				}
				left_color_image_msg.header.stamp = now;    
		
				left_color_image_info = left_color_camera_info_message_;
				left_color_image_info.width = left_color_image_8U3_->width;
				left_color_image_info.height = left_color_image_8U3_->height;
				left_color_image_info.header.stamp = now;
	
				left_color_image_publisher_.publish(left_color_image_msg, left_color_image_info);
			}
		
			// Acquire right color image
			if (right_color_camera_)
			{
				//ROS_INFO("[all_cameras] RIGHT");
		   		/// Release previously acquired IplImage 
				if (right_color_image_8U3_) 
				{
					cvReleaseImage(&right_color_image_8U3_);
					right_color_image_8U3_ = 0;
				}
		
				/// Acquire new image
				if (right_color_camera_->GetColorImage2(&right_color_image_8U3_, false) & ipa_Utils::RET_FAILED)
				{
					ROS_ERROR("[all_cameras] Right color image acquisition failed");
					break;
				}

				try
		  		{
					right_color_image_msg = *(sensor_msgs::CvBridge::cvToImgMsg(right_color_image_8U3_, "bgr8"));
				}
				catch (sensor_msgs::CvBridgeException error)
				{
					ROS_ERROR("[all_cameras] Could not convert right IplImage to ROS message");
					break;
				}
				right_color_image_msg.header.stamp = now;    
		
				right_color_image_info = right_color_camera_info_message_;
				right_color_image_info.width = right_color_image_8U3_->width;
				right_color_image_info.height = right_color_image_8U3_->height;
				right_color_image_info.header.stamp = now;
	
				right_color_image_publisher_.publish(right_color_image_msg, right_color_image_info);
			}
		
			// Acquire image from tof camera	
			if (tof_camera_)
			{
				//ROS_INFO("[all_cameras] TOF");
		                /// Release previously acquired IplImage 
		                if (xyz_tof_image_32F3_)
		                {
		                        cvReleaseImage(&xyz_tof_image_32F3_);
		                        xyz_tof_image_32F3_ = 0;
		                }
		
		                if (grey_tof_image_32F1_)
		                {
		                        cvReleaseImage(&grey_tof_image_32F1_);
		                        grey_tof_image_32F1_ = 0;
		                }
		
				if(tof_camera_->AcquireImages2(0, &grey_tof_image_32F1_, &xyz_tof_image_32F3_, false, false, ipa_CameraSensors::AMPLITUDE) & ipa_Utils::RET_FAILED)
				{
					ROS_ERROR("[all_cameras] Tof image acquisition failed");
		                        tof_camera_ = 0;
					break;	
				}
	
				try
		                {
		                        xyz_tof_image_msg = *(sensor_msgs::CvBridge::cvToImgMsg(xyz_tof_image_32F3_, "passthrough"));
		                        grey_tof_image_msg = *(sensor_msgs::CvBridge::cvToImgMsg(grey_tof_image_32F1_, "passthrough"));
		                }
		                catch (sensor_msgs::CvBridgeException error)
		                {
		                        ROS_ERROR("[all_cameras] Could not convert tof IplImage to ROS message");
					break;
		                }
	
				xyz_tof_image_msg.header.stamp = now;    
				grey_tof_image_msg.header.stamp = now;    
		
				tof_image_info = tof_camera_info_message_;
				tof_image_info.width = grey_tof_image_32F1_->width;
				tof_image_info.height = grey_tof_image_32F1_->height;
				tof_image_info.header.stamp = now;
				
				grey_tof_image_publisher_.publish(grey_tof_image_msg, tof_image_info);
				xyz_tof_image_publisher_.publish(xyz_tof_image_msg, tof_image_info);
			}
			
			ros::spinOnce();
			rate.sleep();
	
		} // END while-loop
	}
Esempio n. 21
0
void CropDecimateNodelet::imageCb(const sensor_msgs::ImageConstPtr& image_msg,
                                  const sensor_msgs::CameraInfoConstPtr& info_msg)
{
  /// @todo Check image dimensions match info_msg
  /// @todo Publish tweaks to config_ so they appear in reconfigure_gui

  Config config;
  {
    boost::lock_guard<boost::recursive_mutex> lock(config_mutex_);
    config = config_;
  }
  int decimation_x = config.decimation_x;
  int decimation_y = config.decimation_y;

  // Compute the ROI we'll actually use
  bool is_bayer = sensor_msgs::image_encodings::isBayer(image_msg->encoding);
  if (is_bayer)
  {
    // Odd offsets for Bayer images basically change the Bayer pattern, but that's
    // unnecessarily complicated to support
    config.x_offset &= ~0x1;
    config.y_offset &= ~0x1;
    config.width &= ~0x1;
    config.height &= ~0x1;    
  }

  int max_width = image_msg->width - config.x_offset;
  int max_height = image_msg->height - config.y_offset;
  int width = config.width;
  int height = config.height;
  if (width == 0 || width > max_width)
    width = max_width;
  if (height == 0 || height > max_height)
    height = max_height;

  // On no-op, just pass the messages along
  if (decimation_x == 1               &&
      decimation_y == 1               &&
      config.x_offset == 0            &&
      config.y_offset == 0            &&
      width  == (int)image_msg->width &&
      height == (int)image_msg->height)
  {
    pub_.publish(image_msg, info_msg);
    return;
  }

  // Get a cv::Mat view of the source data
  CvImageConstPtr source = toCvShare(image_msg);

  // Except in Bayer downsampling case, output has same encoding as the input
  CvImage output(source->header, source->encoding);
  // Apply ROI (no copy, still a view of the image_msg data)
  output.image = source->image(cv::Rect(config.x_offset, config.y_offset, width, height));

  // Special case: when decimating Bayer images, we first do a 2x2 decimation to BGR
  if (is_bayer && (decimation_x > 1 || decimation_y > 1))
  {
    if (decimation_x % 2 != 0 || decimation_y % 2 != 0)
    {
      NODELET_ERROR_THROTTLE(2, "Odd decimation not supported for Bayer images");
      return;
    }

    cv::Mat bgr;
    int step = output.image.step1();
    if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_RGGB8)
      debayer2x2toBGR<uint8_t>(output.image, bgr, 0, 1, step, step + 1);
    else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_BGGR8)
      debayer2x2toBGR<uint8_t>(output.image, bgr, step + 1, 1, step, 0);
    else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_GBRG8)
      debayer2x2toBGR<uint8_t>(output.image, bgr, step, 0, step + 1, 1);
    else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_GRBG8)
      debayer2x2toBGR<uint8_t>(output.image, bgr, 1, 0, step + 1, step);
    else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_RGGB16)
      debayer2x2toBGR<uint16_t>(output.image, bgr, 0, 1, step, step + 1);
    else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_BGGR16)
      debayer2x2toBGR<uint16_t>(output.image, bgr, step + 1, 1, step, 0);
    else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_GBRG16)
      debayer2x2toBGR<uint16_t>(output.image, bgr, step, 0, step + 1, 1);
    else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_GRBG16)
      debayer2x2toBGR<uint16_t>(output.image, bgr, 1, 0, step + 1, step);
    else
    {
      NODELET_ERROR_THROTTLE(2, "Unrecognized Bayer encoding '%s'", image_msg->encoding.c_str());
      return;
    }

    output.image = bgr;
    output.encoding = (bgr.depth() == CV_8U) ? sensor_msgs::image_encodings::BGR8
                                             : sensor_msgs::image_encodings::BGR16;
    decimation_x /= 2;
    decimation_y /= 2;
  }

  // Apply further downsampling, if necessary
  if (decimation_x > 1 || decimation_y > 1)
  {
    cv::Mat decimated;

    if (config.interpolation == image_proc::CropDecimate_NN)
    {
      // Use optimized method instead of OpenCV's more general NN resize
      int pixel_size = output.image.elemSize();
      switch (pixel_size)
      {
        // Currently support up through 4-channel float
        case 1:
          decimate<1>(output.image, decimated, decimation_x, decimation_y);
          break;
        case 2:
          decimate<2>(output.image, decimated, decimation_x, decimation_y);
          break;
        case 3:
          decimate<3>(output.image, decimated, decimation_x, decimation_y);
          break;
        case 4:
          decimate<4>(output.image, decimated, decimation_x, decimation_y);
          break;
        case 6:
          decimate<6>(output.image, decimated, decimation_x, decimation_y);
          break;
        case 8:
          decimate<8>(output.image, decimated, decimation_x, decimation_y);
          break;
        case 12:
          decimate<12>(output.image, decimated, decimation_x, decimation_y);
          break;
        case 16:
          decimate<16>(output.image, decimated, decimation_x, decimation_y);
          break;
        default:
          NODELET_ERROR_THROTTLE(2, "Unsupported pixel size, %d bytes", pixel_size);
          return;
      }
    }
    else
    {
      // Linear, cubic, area, ...
      cv::Size size(output.image.cols / decimation_x, output.image.rows / decimation_y);
      cv::resize(output.image, decimated, size, 0.0, 0.0, config.interpolation);
    }

    output.image = decimated;
  }

  // Create output Image message
  /// @todo Could save copies by allocating this above and having output.image alias it
  sensor_msgs::ImagePtr out_image = output.toImageMsg();

  // Create updated CameraInfo message
  sensor_msgs::CameraInfoPtr out_info = boost::make_shared<sensor_msgs::CameraInfo>(*info_msg);
  int binning_x = std::max((int)info_msg->binning_x, 1);
  int binning_y = std::max((int)info_msg->binning_y, 1);
  out_info->binning_x = binning_x * config.decimation_x;
  out_info->binning_y = binning_y * config.decimation_y;
  out_info->roi.x_offset += config.x_offset * binning_x;
  out_info->roi.y_offset += config.y_offset * binning_y;
  out_info->roi.height = height * binning_y;
  out_info->roi.width = width * binning_x;
  // If no ROI specified, leave do_rectify as-is. If ROI specified, set do_rectify = true.
  if (width != (int)image_msg->width || height != (int)image_msg->height)
    out_info->roi.do_rectify = true;
  
  pub_.publish(out_image, out_info);
}
  void callback(const sensor_msgs::ImageConstPtr &img,
                const sensor_msgs::CameraInfoConstPtr &info) {
    boost::mutex::scoped_lock lock(mutex_);
    ros::Time now = ros::Time::now();

    static boost::circular_buffer<double> in_times(100);
    static boost::circular_buffer<double> out_times(100);
    static boost::circular_buffer<double> in_bytes(100);
    static boost::circular_buffer<double> out_bytes(100);

    ROS_DEBUG("resize: callback");
    if ( !publish_once_ || cp_.getNumSubscribers () == 0 ) {
      ROS_DEBUG("resize: number of subscribers is 0, ignoring image");
      return;
    }

    in_times.push_front((now - last_subscribe_time_).toSec());
    in_bytes.push_front(img->data.size());
    //
    try {
        int width = dst_width_ ? dst_width_ : (resize_x_ * info->width);
        int height = dst_height_ ? dst_height_ : (resize_y_ * info->height);
        double scale_x = dst_width_ ? ((double)dst_width_)/info->width : resize_x_;
        double scale_y = dst_height_ ? ((double)dst_height_)/info->height : resize_y_;

        cv_bridge::CvImagePtr cv_img = cv_bridge::toCvCopy(img);

        cv::Mat tmpmat(height, width, cv_img->image.type());
        cv::resize(cv_img->image, tmpmat, cv::Size(width, height));
        cv_img->image = tmpmat;

        sensor_msgs::CameraInfo tinfo = *info;
        tinfo.height = height;
        tinfo.width = width;
        tinfo.K[0] = tinfo.K[0] * scale_x; // fx
        tinfo.K[2] = tinfo.K[2] * scale_x; // cx
        tinfo.K[4] = tinfo.K[4] * scale_y; // fy
        tinfo.K[5] = tinfo.K[5] * scale_y; // cy

        tinfo.P[0] = tinfo.P[0] * scale_x; // fx
        tinfo.P[2] = tinfo.P[2] * scale_x; // cx
        tinfo.P[3] = tinfo.P[3] * scale_x; // T
        tinfo.P[5] = tinfo.P[5] * scale_y; // fy
        tinfo.P[6] = tinfo.P[6] * scale_y; // cy

        if ( !use_messages_ || now - last_publish_time_  > period_ ) {
            cp_.publish(cv_img->toImageMsg(),
                        boost::make_shared<sensor_msgs::CameraInfo> (tinfo));

            out_times.push_front((now - last_publish_time_).toSec());
            out_bytes.push_front(cv_img->image.total()*cv_img->image.elemSize());

            last_publish_time_ = now;
        }
    } catch( cv::Exception& e ) {
        ROS_ERROR("%s", e.what());
    }


    float duration =  (now - last_rosinfo_time_).toSec();
    if ( duration > 2 ) {
        int in_time_n = in_times.size();
        int out_time_n = out_times.size();
        double in_time_mean = 0, in_time_rate = 1.0, in_time_std_dev = 0.0, in_time_max_delta, in_time_min_delta;
        double out_time_mean = 0, out_time_rate = 1.0, out_time_std_dev = 0.0, out_time_max_delta, out_time_min_delta;

        std::for_each( in_times.begin(), in_times.end(), (in_time_mean += boost::lambda::_1) );
        in_time_mean /= in_time_n;
        in_time_rate /= in_time_mean;
        std::for_each( in_times.begin(), in_times.end(), (in_time_std_dev += (boost::lambda::_1 - in_time_mean)*(boost::lambda::_1 - in_time_mean) ) );
        in_time_std_dev = sqrt(in_time_std_dev/in_time_n);
        if ( in_time_n > 1 ) {
            in_time_min_delta = *std::min_element(in_times.begin(), in_times.end());
            in_time_max_delta = *std::max_element(in_times.begin(), in_times.end());
        }

        std::for_each( out_times.begin(), out_times.end(), (out_time_mean += boost::lambda::_1) );
        out_time_mean /= out_time_n;
        out_time_rate /= out_time_mean;
        std::for_each( out_times.begin(), out_times.end(), (out_time_std_dev += (boost::lambda::_1 - out_time_mean)*(boost::lambda::_1 - out_time_mean) ) );
        out_time_std_dev = sqrt(out_time_std_dev/out_time_n);
        if ( out_time_n > 1 ) {
            out_time_min_delta = *std::min_element(out_times.begin(), out_times.end());
            out_time_max_delta = *std::max_element(out_times.begin(), out_times.end());
        }

        double in_byte_mean = 0, out_byte_mean = 0;
        std::for_each( in_bytes.begin(), in_bytes.end(), (in_byte_mean += boost::lambda::_1) );
        std::for_each( out_bytes.begin(), out_bytes.end(), (out_byte_mean += boost::lambda::_1) );
        in_byte_mean /= duration;
        out_byte_mean /= duration;

        ROS_INFO_STREAM(" in  bandwidth: " << std::fixed << std::setw(11) << std::setprecision(3)  << in_byte_mean/1000*8
                        << " Kbps rate:"   << std::fixed << std::setw(7) << std::setprecision(3) << in_time_rate
                        << " hz min:"      << std::fixed << std::setw(7) << std::setprecision(3) << in_time_min_delta
                        << " s max: "    << std::fixed << std::setw(7) << std::setprecision(3) << in_time_max_delta
                        << " s std_dev: "<< std::fixed << std::setw(7) << std::setprecision(3) << in_time_std_dev << "s n: " << in_time_n);
        ROS_INFO_STREAM(" out bandwidth: " << std::fixed << std::setw(11) << std::setprecision(3)  << out_byte_mean/1000*8
                        << " kbps rate:"   << std::fixed << std::setw(7) << std::setprecision(3) << out_time_rate
                        << " hz min:"      << std::fixed << std::setw(7) << std::setprecision(3) << out_time_min_delta
                        << " s max: "    << std::fixed << std::setw(7) << std::setprecision(3) << out_time_max_delta
                        << " s std_dev: "<< std::fixed << std::setw(7) << std::setprecision(3) << out_time_std_dev << "s n: " << out_time_n);
        in_times.clear();
        in_bytes.clear();
        out_times.clear();
        out_bytes.clear();
        last_rosinfo_time_ = now;
    }

    last_subscribe_time_ = now;

    if(use_snapshot_) {
      publish_once_ = false;
    }
  }
void disparityCb(const stereo_msgs::DisparityImageConstPtr& msg)
{
    if(cam_pub.getNumSubscribers() > 0 ||
       mask_pub.getNumSubscribers() > 0)
    {
        //    double ticksBefore = cv::getTickCount();
        // Check for common errors in input
        if (msg->min_disparity == 0.0 && msg->max_disparity == 0.0)
        {
            ROS_ERROR("Disparity image fields min_disparity and max_disparity are not set");
            return;
        }
        if (msg->image.encoding != sensor_msgs::image_encodings::TYPE_32FC1)
        {
            ROS_ERROR("Disparity image must be 32-bit floating point (encoding '32FC1'), but has encoding '%s'",
                      msg->image.encoding.c_str());
            return;
        }

        // code taken from image_view / disparity_view
        // Colormap and display the disparity image
        float min_disparity = msg->min_disparity;
        float max_disparity = msg->max_disparity;
        float multiplier = 255.0f / (max_disparity - min_disparity);

        const cv::Mat_<float> dmat(msg->image.height, msg->image.width,
                                   (float*)&msg->image.data[0], msg->image.step);
        cv::Mat disparity_greyscale;
        disparity_greyscale.create(msg->image.height, msg->image.width, CV_8UC1);

        for (int row = 0; row < disparity_greyscale.rows; ++row) {
            const float* d = dmat[row];
            for (int col = 0; col < disparity_greyscale.cols; ++col) {
                int index = (d[col] - min_disparity) * multiplier + 0.5;

                //index = std::min(255, std::max(0, index));
                // pushing it into the interval does not matter because of the threshold afterwards
                if(index >= threshold)
                    disparity_greyscale.at<uchar>(row, col) = 255;
                else
                    disparity_greyscale.at<uchar>(row, col) = 0;
            }
        }

        cv::Mat tmp1, mask;
        cv::erode(disparity_greyscale, tmp1,
                  cv::Mat::ones(erode_size, erode_size, CV_8UC1),
                  cv::Point(-1, -1), erode_iterations);
        cv::dilate(tmp1, mask,
                   cv::Mat::ones(dilate_size, dilate_size, CV_8UC1),
                   cv::Point(-1, -1), dilate_iterations);

        if(mask_pub.getNumSubscribers() > 0)
        {
            cv_bridge::CvImage mask_msg;
            mask_msg.header = msg->header;
            mask_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
            mask_msg.image = mask;
            mask_pub.publish(mask_msg.toImageMsg());
        }

        if(!image_rect.empty() && cam_pub.getNumSubscribers() > 0)
        {
            cv::Mat masked;
            image_rect.copyTo(masked, mask);
            cv_bridge::CvImage masked_msg;
            masked_msg.header = msg->header;
            masked_msg.encoding = sensor_msgs::image_encodings::BGR8;

            //if we want rescale then rescale
            if(scale_factor > 1)
            {
                cv::Mat masked_tmp = masked;
                cv::resize(masked_tmp, masked,
                           cv::Size(masked.cols*scale_factor, masked.rows*scale_factor));
            }
            masked_msg.image = masked;
            // to provide a synchronized output we publish both topics with the same timestamp
            ros::Time currentTime    = ros::Time::now();
            masked_msg.header.stamp  = currentTime;
            camera_info.header.stamp = currentTime;
            cam_pub.publish(*masked_msg.toImageMsg(), camera_info);
        }

        //    ROS_INFO("disparityCb runtime: %f ms",
        //             1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
    }
}
Esempio n. 24
0
 virtual void publish(sensor_msgs::Image::Ptr& image, sensor_msgs::CameraInfo::Ptr& camera_info)
 {
   publisher_.publish(image, camera_info);
 }
Esempio n. 25
0
        /// Continuously advertises xyz and grey
	bool spin()
        {
		sensor_msgs::Image::Ptr xyz_image_msg_ptr;
		sensor_msgs::Image::Ptr grey_image_msg_ptr;
		sensor_msgs::CameraInfo tof_image_info;

		ros::Rate rate(10);
		while(node_handle_.ok())
		{
	                /// Release previously acquired IplImage 
	                if (xyz_image_32F3_)
	                {
	                        cvReleaseImage(&xyz_image_32F3_);
	                        xyz_image_32F3_ = 0;
	                }
	
	                if (grey_image_32F1_)
	                {
	                        cvReleaseImage(&grey_image_32F1_);
	                        grey_image_32F1_ = 0;
	                }
	
			if(tof_camera_->AcquireImages2(0, &grey_image_32F1_, &xyz_image_32F3_, false, false, ipa_CameraSensors::AMPLITUDE) & ipa_Utils::RET_FAILED)
			{
				ROS_ERROR("[tof_camera] Tof image acquisition failed");
	                        return false;	
			}

			try
	                {
	                        xyz_image_msg_ptr = sensor_msgs::CvBridge::cvToImgMsg(xyz_image_32F3_, "passthrough");
	                }
	                catch (sensor_msgs::CvBridgeException error)
	                {
	                        ROS_ERROR("[tof_camera] Could not convert 32bit xyz IplImage to ROS message");
				return false;
	                }
	
			try
	                {
	                        grey_image_msg_ptr = sensor_msgs::CvBridge::cvToImgMsg(grey_image_32F1_, "passthrough");
	                }
	                catch (sensor_msgs::CvBridgeException error)
	                {
	                        ROS_ERROR("[tof_camera] Could not convert 32bit grey IplImage to ROS message");
				return false;
	                }
	
	                /// Set time stamp
			ros::Time now = ros::Time::now();
			xyz_image_msg_ptr->header.stamp = now;
			grey_image_msg_ptr->header.stamp = now;

			tof_image_info = camera_info_msg_;
			tof_image_info.width = grey_image_32F1_->width;
			tof_image_info.height = grey_image_32F1_->height;
			tof_image_info.header.stamp = now;

			/// publish message
			xyz_image_publisher_.publish(*xyz_image_msg_ptr, tof_image_info);
			grey_image_publisher_.publish(*grey_image_msg_ptr, tof_image_info);

			ros::spinOnce();
			rate.sleep();
		}
                return true;
        }
Esempio n. 26
0
 virtual void publish(sensor_msgs::Image::Ptr& image, sensor_msgs::CameraInfo::Ptr& camera_info)
 {
   if(active_publisher_ != 0)
     active_publisher_->publish(image, camera_info);
 }
Esempio n. 27
0
 void publishImage(tPvFrame* frame)
 {
   if (processFrame(frame, img_, cam_info_))
     streaming_pub_.publish(img_, cam_info_);
 }
Esempio n. 28
0
void CropDecimateNodelet::imageCb(const sensor_msgs::ImageConstPtr& image_msg,
                                  const sensor_msgs::CameraInfoConstPtr& info_msg)
{
  /// @todo Check image dimensions match info_msg
  /// @todo Publish tweaks to config_ so they appear in reconfigure_gui

  Config config;
  {
    boost::lock_guard<boost::recursive_mutex> lock(config_mutex_);
    config = config_;
  }
  
  /// @todo Could support odd offsets for Bayer images, but it's a tad complicated
  config.x_offset = (config.x_offset / 2) * 2;
  config.y_offset = (config.y_offset / 2) * 2;

  int max_width = image_msg->width - config.x_offset;
  int max_height = image_msg->height - config.y_offset;
  int width = config.width;
  int height = config.height;
  if (width == 0 || width > max_width)
    width = max_width;
  if (height == 0 || height > max_height)
    height = max_height;

  // On no-op, just pass the messages along
  if (config.decimation_x == 1  &&
      config.decimation_y == 1  &&
      config.x_offset == 0      &&
      config.y_offset == 0      &&
      width  == (int)image_msg->width &&
      height == (int)image_msg->height)
  {
    pub_.publish(image_msg, info_msg);
    return;
  }

  /// @todo Support 16-bit encodings
  if (sensor_msgs::image_encodings::bitDepth(image_msg->encoding) != 8)
  {
    NODELET_ERROR_THROTTLE(2, "Only 8-bit encodings are currently supported");
    return;
  }
  
  // Create updated CameraInfo message
  sensor_msgs::CameraInfoPtr out_info = boost::make_shared<sensor_msgs::CameraInfo>(*info_msg);
  int binning_x = std::max((int)info_msg->binning_x, 1);
  int binning_y = std::max((int)info_msg->binning_y, 1);
  out_info->binning_x = binning_x * config.decimation_x;
  out_info->binning_y = binning_y * config.decimation_y;
  out_info->roi.x_offset += config.x_offset * binning_x;
  out_info->roi.y_offset += config.y_offset * binning_y;
  out_info->roi.height = height * binning_y;
  out_info->roi.width = width * binning_x;
  // If no ROI specified, leave do_rectify as-is. If ROI specified, set do_rectify = true.
  if (width != (int)image_msg->width || height != (int)image_msg->height)
    out_info->roi.do_rectify = true;

  // Create new image message
  sensor_msgs::ImagePtr out_image = boost::make_shared<sensor_msgs::Image>();
  out_image->header = image_msg->header;
  out_image->height = height / config.decimation_y;
  out_image->width  = width  / config.decimation_x;
  // Don't know encoding, step, or data size yet

  if (config.decimation_x == 1 && config.decimation_y == 1)
  {
    // Crop only, preserving original encoding
    int num_channels = sensor_msgs::image_encodings::numChannels(image_msg->encoding);
    out_image->encoding = image_msg->encoding;
    out_image->step = out_image->width * num_channels;
    out_image->data.resize(out_image->height * out_image->step);

    const uint8_t* input_buffer = &image_msg->data[config.y_offset*image_msg->step + config.x_offset*num_channels];
    uint8_t* output_buffer = &out_image->data[0];
    for (int y = 0; y < (int)out_image->height; ++y)
    {
      memcpy(output_buffer, input_buffer, out_image->step);
      input_buffer += image_msg->step;
      output_buffer += out_image->step;
    }
  }
  else if (sensor_msgs::image_encodings::isMono(image_msg->encoding))
  {
    // Output is also monochrome
    out_image->encoding = sensor_msgs::image_encodings::MONO8;
    out_image->step = out_image->width;
    out_image->data.resize(out_image->height * out_image->step);

    // Hit only the pixel groups we need
    int input_step = config.decimation_y * image_msg->step;
    int input_skip = config.decimation_x;
    const uint8_t* input_row = &image_msg->data[config.y_offset*image_msg->step + config.x_offset];
    uint8_t* output_buffer = &out_image->data[0];

    // Downsample
    for (int y = 0; y < (int)out_image->height; ++y, input_row += input_step)
    {
      const uint8_t* input_buffer = input_row;
      for (int x = 0; x < (int)out_image->width; ++x, input_buffer += input_skip, ++output_buffer)
        *output_buffer = *input_buffer;
    }
  }
  else
  {
    // Output is color
    out_image->encoding = sensor_msgs::image_encodings::BGR8;
    out_image->step     = out_image->width * 3;
    out_image->data.resize(out_image->height * out_image->step);

    if (sensor_msgs::image_encodings::isBayer(image_msg->encoding))
    {
      if (config.decimation_x % 2 != 0 || config.decimation_y % 2 != 0)
      {
        NODELET_ERROR_THROTTLE(2, "Odd decimation not supported for Bayer images");
        return;
      }
      
      // Compute offsets to color elements
      int R, G1, G2, B;
      if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_RGGB8)
      {
        R  = 0;
        G1 = 1;
        G2 = image_msg->step;
        B  = image_msg->step + 1;
      }
      else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_BGGR8)
      {
        B  = 0;
        G1 = 1;
        G2 = image_msg->step;
        R  = image_msg->step + 1;
      }
      else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_GBRG8)
      {
        G1 = 0;
        B  = 1;
        R  = image_msg->step;
        G2 = image_msg->step + 1;
      }
      else if (image_msg->encoding == sensor_msgs::image_encodings::BAYER_GRBG8)
      {
        G1 = 0;
        R  = 1;
        B  = image_msg->step;
        G2 = image_msg->step + 1;
      }
      else
      {
        NODELET_ERROR_THROTTLE(2, "Unrecognized Bayer encoding '%s'", image_msg->encoding.c_str());
        return;
      }

      // Hit only the pixel groups we need
      int input_step = config.decimation_y * image_msg->step;
      int input_skip = config.decimation_x;
      const uint8_t* input_row = &image_msg->data[config.y_offset*image_msg->step + config.x_offset];
      uint8_t* output_buffer = &out_image->data[0];

      // Downsample and debayer at once
      for (int y = 0; y < (int)out_image->height; ++y, input_row += input_step)
      {
        const uint8_t* input_buffer = input_row;
        for (int x = 0; x < (int)out_image->width; ++x, input_buffer += input_skip, output_buffer += 3)
        {
          output_buffer[0] = input_buffer[B];
          output_buffer[1] = (input_buffer[G1] + input_buffer[G2]) / 2;
          output_buffer[2] = input_buffer[R];
        }
      }
    }
    else
    {
      // Support RGB-type encodings
      int R, G, B, channels;
      if (image_msg->encoding == sensor_msgs::image_encodings::BGR8)
      {
        B = 0;
        G = 1;
        R = 2;
        channels = 3;
      }
      else if (image_msg->encoding == sensor_msgs::image_encodings::RGB8)
      {
        R = 0;
        G = 1;
        B = 2;
        channels = 3;
      }
      else if (image_msg->encoding == sensor_msgs::image_encodings::BGRA8)
      {
        B = 0;
        G = 1;
        R = 2;
        channels = 4;
      }
      else if (image_msg->encoding == sensor_msgs::image_encodings::RGBA8)
      {
        R = 0;
        G = 1;
        B = 2;
        channels = 4;
      }
      else
      {
        NODELET_ERROR_THROTTLE(2, "Unsupported encoding '%s'", image_msg->encoding.c_str());
        return;
      }

      // Hit only the pixel groups we need
      int input_step = config.decimation_y * image_msg->step;
      int input_skip = config.decimation_x * channels;
      const uint8_t* input_row = &image_msg->data[config.y_offset*image_msg->step + config.x_offset*channels];
      uint8_t* output_buffer = &out_image->data[0];

      // Downsample
      for (int y = 0; y < (int)out_image->height; ++y, input_row += input_step)
      {
        const uint8_t* input_buffer = input_row;
        for (int x = 0; x < (int)out_image->width; ++x, input_buffer += input_skip, output_buffer += 3)
        {
          output_buffer[0] = input_buffer[B];
          output_buffer[1] = input_buffer[G];
          output_buffer[2] = input_buffer[R];
        }
      }

    }
  }

  pub_.publish(out_image, out_info);
}
Esempio n. 29
0
int CameraNode::run()
{
    ros::WallTime t_prev, t_now;
    int dt_avg;

    // reset timing information
    dt_avg = 0;
    t_prev = ros::WallTime::now();

    while (ros::ok()) {
        std::vector<uint8_t> data(step*height);

        CamContext_grab_next_frame_blocking(cc,&data[0],-1.0f); // never timeout

        int errnum = cam_iface_have_error();
        if (errnum == CAM_IFACE_FRAME_TIMEOUT) {
            cam_iface_clear_error();
            continue; // wait again
        }

        if (errnum == CAM_IFACE_FRAME_DATA_MISSING_ERROR) {
            cam_iface_clear_error();
        } else if (errnum == CAM_IFACE_FRAME_INTERRUPTED_SYSCALL) {
            cam_iface_clear_error();
        } else if (errnum == CAM_IFACE_FRAME_DATA_CORRUPT_ERROR) {
            cam_iface_clear_error();
        } else {
            _check_error();

            if (!_got_frame) {
                ROS_INFO("receiving images");
                _got_frame = true;
            }

            if(dt_avg++ == 9) {
                ros::WallTime t_now = ros::WallTime::now();
                ros::WallDuration dur = t_now - t_prev;
                t_prev = t_now;

                std_msgs::Float32 rate;
                rate.data = 10 / dur.toSec();
                _pub_rate.publish(rate);

                dt_avg = 0;
            }

            sensor_msgs::Image msg;
            if (_host_timestamp) {
                msg.header.stamp = ros::Time::now();
            } else {
                double timestamp;
                CamContext_get_last_timestamp(cc,&timestamp);
                _check_error();
                msg.header.stamp = ros::Time(timestamp);
            }

            unsigned long framenumber;
            CamContext_get_last_framenumber(cc,&framenumber);
            _check_error();

            msg.header.seq = framenumber;
            msg.header.frame_id = "0";
            msg.height = height;
            msg.width = width;
            msg.encoding = encoding;
            msg.step = step;
            msg.data = data;

            // get current CameraInfo data
            cam_info = cam_info_manager->getCameraInfo();
            cam_info.header.stamp = msg.header.stamp;
            cam_info.header.seq = msg.header.seq;
            cam_info.header.frame_id = msg.header.frame_id;
            cam_info.height = height;
            cam_info.width = width;

            _pub_image.publish(msg, cam_info);
        }

        ros::spinOnce();
    }

    CamContext_stop_camera(cc);
    cam_iface_shutdown();
    return 0;
}
int getNumRGBSubscribers()
{
    return realsense_reg_points_pub.getNumSubscribers() + realsense_rgb_image_pub.getNumSubscribers();
}