Ejemplo n.º 1
0
	void callback(const sensor_msgs::ImageConstPtr& depth)
	{
		if(depth->encoding.compare(sensor_msgs::image_encodings::TYPE_16UC1)!=0 &&
		   depth->encoding.compare(sensor_msgs::image_encodings::TYPE_32FC1)!=0 &&
		   depth->encoding.compare(sensor_msgs::image_encodings::MONO16)!=0)
		{
			NODELET_ERROR("Input type depth=32FC1,16UC1,MONO16");
			return;
		}

		if(pub_.getNumSubscribers())
		{
			if(depth->width == model_.getWidth() && depth->width == model_.getWidth())
			{
				cv_bridge::CvImagePtr imageDepthPtr = cv_bridge::toCvCopy(depth);
				model_.undistort(imageDepthPtr->image);
				pub_.publish(imageDepthPtr->toImageMsg());
			}
			else
			{
				NODELET_ERROR("Input depth image size (%dx%d) and distortion model "
						"size (%dx%d) don't match! Cannot undistort image.",
						depth->width, depth->height,
						model_.getWidth(), model_.getHeight());
			}
		}
	}
Ejemplo n.º 2
0
	virtual void onInit()
	{
		ros::NodeHandle & nh = getNodeHandle();
		ros::NodeHandle & pnh = getPrivateNodeHandle();

		std::string modelPath;
		pnh.param("model", modelPath, modelPath);

		if(modelPath.empty())
		{
			NODELET_ERROR("undistort_depth: \"model\" parameter should be set!");
		}

		model_.load(modelPath);
		if(!model_.isValid())
		{
			NODELET_ERROR("Loaded distortion model from \"%s\" is not valid!", modelPath.c_str());
		}
		else
		{
			image_transport::ImageTransport it(nh);
			sub_ = it.subscribe("depth", 1, &UndistortDepth::callback, this);
			pub_ = it.advertise(uFormat("%s_undistorted", nh.resolveName("depth").c_str()), 1);
		}
	}
  bool PolygonPointsSampler::isValidMessage(
    const jsk_recognition_msgs::PolygonArray::ConstPtr& polygon_msg,
    const jsk_recognition_msgs::ModelCoefficientsArray::ConstPtr& coefficients_msg)
  {
    if (polygon_msg->polygons.size() == 0) {
      NODELET_DEBUG("empty polygons");
      return false;
    }
    if (coefficients_msg->coefficients.size() != polygon_msg->polygons.size()) {
      NODELET_ERROR("The size of coefficients and polygons are not same");
      return false;
    }

    std::string frame_id = polygon_msg->header.frame_id;
    for (size_t i = 0; i < polygon_msg->polygons.size(); i++) {
      if (frame_id != polygon_msg->polygons[i].header.frame_id) {
        NODELET_ERROR("Frame id of polygon is not same: %s, %s",
                      frame_id.c_str(),
                      polygon_msg->polygons[i].header.frame_id.c_str());
        return false;
      }
    }
    for (size_t i = 0; i < coefficients_msg->coefficients.size(); i++) {
      if (frame_id != coefficients_msg->coefficients[i].header.frame_id) {
        NODELET_ERROR("Frame id of coefficient is not same: %s, %s",
                      frame_id.c_str(),
                      coefficients_msg->coefficients[i].header.frame_id.c_str());
        return false;
      }
    }
    return true;
  }
  virtual void onInit()
  {
    nh_ = getNodeHandle();
    it_ = boost::shared_ptr<image_transport::ImageTransport>(new image_transport::ImageTransport(nh_));
    local_nh_ = ros::NodeHandle("~");

    local_nh_.param("debug_view", debug_view_, false);
    subscriber_count_ = 0;
    prev_stamp_ = ros::Time(0, 0);

    image_transport::SubscriberStatusCallback img_connect_cb    = boost::bind(&FaceDetectionNodelet::img_connectCb, this, _1);
    image_transport::SubscriberStatusCallback img_disconnect_cb = boost::bind(&FaceDetectionNodelet::img_disconnectCb, this, _1);
    ros::SubscriberStatusCallback msg_connect_cb    = boost::bind(&FaceDetectionNodelet::msg_connectCb, this, _1);
    ros::SubscriberStatusCallback msg_disconnect_cb = boost::bind(&FaceDetectionNodelet::msg_disconnectCb, this, _1);
    img_pub_ = image_transport::ImageTransport(local_nh_).advertise("image", 1, img_connect_cb, img_disconnect_cb);
    msg_pub_ = local_nh_.advertise<opencv_apps::FaceArrayStamped>("faces", 1, msg_connect_cb, msg_disconnect_cb);
        
    if( debug_view_ ) {
      subscriber_count_++;
    }
    std::string face_cascade_name, eyes_cascade_name;
    local_nh_.param("face_cascade_name", face_cascade_name, std::string("/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml"));
    local_nh_.param("eyes_cascade_name", eyes_cascade_name, std::string("/usr/share/opencv/haarcascades/haarcascade_eye_tree_eyeglasses.xml"));
        
    if( !face_cascade_.load( face_cascade_name ) ){ NODELET_ERROR("--Error loading %s", face_cascade_name.c_str()); };
    if( !eyes_cascade_.load( eyes_cascade_name ) ){ NODELET_ERROR("--Error loading %s", eyes_cascade_name.c_str()); };

    dynamic_reconfigure::Server<face_detection::FaceDetectionConfig>::CallbackType f =
      boost::bind(&FaceDetectionNodelet::reconfigureCallback, this, _1, _2);
    srv.setCallback(f);
  }
Ejemplo n.º 5
0
void
jsk_pcl_ros::Filter::input_indices_callback (const PointCloud2::ConstPtr &cloud, const PointIndicesConstPtr &indices)
{
  // No subscribers, no work
  if (pub_output_.getNumSubscribers () <= 0)
    return;

  // If cloud is given, check if it's valid
  if (!isValid (cloud))
  {
    NODELET_ERROR ("[%s::input_indices_callback] Invalid input!", getName ().c_str ());
    return;
  }
  // If indices are given, check if they are valid
  if (indices && !isValid (indices))
  {
    NODELET_ERROR ("[%s::input_indices_callback] Invalid indices!", getName ().c_str ());
    return;
  }

  /// DEBUG
  if (indices)
    NODELET_DEBUG ("[%s::input_indices_callback]\n"
                   "                                 - PointCloud with %d data points (%s), stamp %f, and frame %s on topic %s received.\n"
                   "                                 - PointIndices with %zu values, stamp %f, and frame %s on topic %s received.",
                   getName ().c_str (), 
                   cloud->width * cloud->height, pcl::getFieldsList (*cloud).c_str (), cloud->header.stamp.toSec (), cloud->header.frame_id.c_str (), pnh_->resolveName ("input").c_str (),
                   indices->indices.size (), indices->header.stamp.toSec (), indices->header.frame_id.c_str (), pnh_->resolveName ("indices").c_str ());
  else
    NODELET_DEBUG ("[%s::input_indices_callback] PointCloud with %d data points and frame %s on topic %s received.", getName ().c_str (), cloud->width * cloud->height, cloud->header.frame_id.c_str (), pnh_->resolveName ("input").c_str ());
  ///

  // Check whether the user has given a different input TF frame
  tf_input_orig_frame_ = cloud->header.frame_id;
  PointCloud2::ConstPtr cloud_tf;
  if (!tf_input_frame_.empty () && cloud->header.frame_id != tf_input_frame_)
  {
    NODELET_DEBUG ("[%s::input_indices_callback] Transforming input dataset from %s to %s.", getName ().c_str (), cloud->header.frame_id.c_str (), tf_input_frame_.c_str ());
    // Save the original frame ID
    // Convert the cloud into the different frame
    PointCloud2 cloud_transformed;
    tf_listener_.waitForTransform(cloud->header.frame_id, tf_input_frame_, cloud->header.stamp, ros::Duration(5.0)); 
    if (!pcl_ros::transformPointCloud (tf_input_frame_, *cloud, cloud_transformed, tf_listener_))
    {
      NODELET_ERROR ("[%s::input_indices_callback] Error converting input dataset from %s to %s.", getName ().c_str (), cloud->header.frame_id.c_str (), tf_input_frame_.c_str ());
      return;
    }
    cloud_tf = boost::make_shared<PointCloud2> (cloud_transformed);
  }
  else
    cloud_tf = cloud;

  // Need setInputCloud () here because we have to extract x/y/z
  IndicesPtr vindices;
  if (indices)
    vindices.reset (new std::vector<int> (indices->indices));

  computePublish (cloud_tf, vindices);
}
Ejemplo n.º 6
0
 void CoreStatus::exit()
 {
   if (service::waitForService ("roah_rsbb_shutdown", 500)) {
     std_srvs::Empty s;
     if (! service::call ("roah_rsbb_shutdown", s)) {
       NODELET_ERROR ("Error calling roah_rsbb_shutdown service");
     }
   }
   else {
     NODELET_ERROR ("Could not find roah_rsbb_shutdown service");
   }
 }
  void TiltLaserListener::onInit()
  {
    DiagnosticNodelet::onInit();

    if (pnh_->hasParam("joint_name")) {
      pnh_->getParam("joint_name", joint_name_);
    }
    else {
      NODELET_ERROR("no ~joint_state is specified");
      return;
    }
    pnh_->param("overwrap_angle", overwrap_angle_, 0.0);
    std::string laser_type;
    pnh_->param("laser_type", laser_type, std::string("tilt_half_down"));
    if (laser_type == "tilt_half_up") {
      laser_type_ = TILT_HALF_UP;
    }
    else if (laser_type == "tilt_half_down") {
      laser_type_ = TILT_HALF_DOWN;
    }
    else if (laser_type == "tilt") {
      laser_type_ = TILT;
    }
    else if (laser_type == "infinite_spindle") {
      laser_type_ = INFINITE_SPINDLE;
    }
    else if (laser_type == "infinite_spindle_half") {
      laser_type_ = INFINITE_SPINDLE_HALF;
    }
    else {
      NODELET_ERROR("unknown ~laser_type: %s", laser_type.c_str());
      return;
    }
    pnh_->param("use_laser_assembler", use_laser_assembler_, false);
    if (use_laser_assembler_) {
      assemble_cloud_srv_
        = pnh_->serviceClient<laser_assembler::AssembleScans2>("assemble_scans2");
      cloud_pub_
        = advertise<sensor_msgs::PointCloud2>(*pnh_, "output_cloud", 1);
    }
    prev_angle_ = 0;
    prev_velocity_ = 0;
    start_time_ = ros::Time::now();
    clear_cache_service_ = pnh_->advertiseService(
      "clear_cache", &TiltLaserListener::clearCacheCallback,
      this);
    trigger_pub_ = advertise<jsk_recognition_msgs::TimeRange>(*pnh_, "output", 1);
    
  }
 void CloudOnPlane::predicate(const sensor_msgs::PointCloud2::ConstPtr& cloud_msg,
                              const jsk_recognition_msgs::PolygonArray::ConstPtr& polygon_msg)
 {
   boost::mutex::scoped_lock lock(mutex_);
   vital_checker_->poke();
   // check header
   if (!jsk_recognition_utils::isSameFrameId(*cloud_msg, *polygon_msg)) {
     NODELET_ERROR("frame_id does not match: cloud: %s, polygon: %s",
                       cloud_msg->header.frame_id.c_str(), polygon_msg->header.frame_id.c_str());
     return;
   }
   pcl::PointCloud<pcl::PointXYZ>::Ptr cloud(new pcl::PointCloud<pcl::PointXYZ>);
   pcl::fromROSMsg(*cloud_msg, *cloud);
   // convert jsk_recognition_msgs::PolygonArray to jsk_recognition_utils::Polygon
   std::vector<jsk_recognition_utils::ConvexPolygon::Ptr> polygons;
   for (size_t i = 0; i < polygon_msg->polygons.size(); i++) {
     polygons.push_back(jsk_recognition_utils::ConvexPolygon::fromROSMsgPtr(polygon_msg->polygons[i].polygon));
   }
   
   for (size_t i = 0; i < polygon_msg->polygons.size(); i++) {
     jsk_recognition_utils::ConvexPolygon::Ptr poly = polygons[i];
     for (size_t j = 0; j < cloud->points.size(); j++) {
       Eigen::Vector3f p = cloud->points[j].getVector3fMap();
       if (poly->distanceSmallerThan(p, distance_thr_)) {
         buffer_->addValue(false);
         publishPredicate(cloud_msg->header, !buffer_->isAllTrueFilled());
         return;
       }
     }
   }
   buffer_->addValue(true);
   publishPredicate(cloud_msg->header, !buffer_->isAllTrueFilled());
 }
 void DepthCalibration::calibrate(
     const sensor_msgs::Image::ConstPtr& msg,
     const sensor_msgs::CameraInfo::ConstPtr& camera_info)
 {
   boost::mutex::scoped_lock lock(mutex_);
   cv_bridge::CvImagePtr cv_ptr;
   try
   {
     cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::TYPE_32FC1);
   }
   catch (cv_bridge::Exception& e)
   {
     NODELET_ERROR("cv_bridge exception: %s", e.what());
     return;
   }
   cv::Mat image = cv_ptr->image;
   cv::Mat output_image = image.clone();
   double cu = camera_info->P[2];
   double cv = camera_info->P[6];
   for(int v = 0; v < image.rows; v++) {
     for(int u = 0; u < image.cols; u++) {
       float z = image.at<float>(v, u);
       if (isnan(z)) {
         output_image.at<float>(v, u) = z;
       }
       else {
         output_image.at<float>(v, u) = applyModel(z, u, v, cu, cv);
       }
       //NODELET_INFO("z: %f", z);
     }
   }
   sensor_msgs::Image::Ptr ros_image = cv_bridge::CvImage(msg->header, "32FC1", output_image).toImageMsg();
   pub_.publish(ros_image);
 }
  void PlaneSupportedCuboidEstimator::updateParticlePolygonRelationship(
    ParticleCloud::Ptr particles)
  {
    if (latest_polygon_msg_->polygons.size() == 0) {
      NODELET_ERROR("no valid polygons, skip update relationship");
      return;
    }

    // The order of convexes and polygons_ should be same
    // because it is inside of critical section.
    std::vector<ConvexPolygon::Ptr> convexes(latest_polygon_msg_->polygons.size());
    for (size_t i = 0; i < latest_polygon_msg_->polygons.size(); i++) {
      ConvexPolygon::Ptr polygon = ConvexPolygon::fromROSMsgPtr(latest_polygon_msg_->polygons[i].polygon);
      polygon->decomposeToTriangles();
      convexes[i] = polygon;
    }

#ifdef _OPENMP
#pragma omp parallel for
#endif
    // Second, compute distances and assing polygons.
    for (size_t i = 0; i < particles->points.size(); i++) {
      size_t nearest_index = getNearestPolygon(particles->points[i], convexes);
      //particles->points[i].plane = polygons[nearest_index];
      particles->points[i].plane_index = (int)nearest_index;
    }
  }
 void ProjectImagePoint::project(
   const geometry_msgs::PointStamped::ConstPtr& msg)
 {
   vital_checker_->poke();
   boost::mutex::scoped_lock lock(mutex_);
   if (!camera_info_) {
     NODELET_WARN(
       "[ProjectImagePoint::project] camera info is not yet available");
     return;
   }
   image_geometry::PinholeCameraModel model;
   model.fromCameraInfo(camera_info_);
   cv::Point3d ray = model.projectPixelTo3dRay(
     cv::Point2d(msg->point.x, msg->point.y));
   geometry_msgs::Vector3Stamped vector;
   vector.header.frame_id = camera_info_->header.frame_id;
   vector.header = msg->header;
   vector.vector.x = ray.x;
   vector.vector.y = ray.y;
   vector.vector.z = ray.z;
   pub_vector_.publish(vector);
   if (ray.z == 0.0) {
     NODELET_ERROR("Z value of projected ray is 0");
     return;
   }
   double alpha = z_ / ray.z;
   geometry_msgs::PointStamped point;
   point.header = msg->header;
   point.header.frame_id = camera_info_->header.frame_id;
   point.point.x = ray.x * alpha;
   point.point.y = ray.y * alpha;
   point.point.z = ray.z * alpha;
   pub_.publish(point);
   
 }
Ejemplo n.º 12
0
 void LabDecomposer::decompose(
   const sensor_msgs::Image::ConstPtr& image_msg)
 {
   cv_bridge::CvImagePtr cv_ptr = cv_bridge::toCvCopy(
     image_msg, image_msg->encoding);
   cv::Mat image = cv_ptr->image;
   cv::Mat lab_image;
   std::vector<cv::Mat> lab_planes;
   if (image_msg->encoding == sensor_msgs::image_encodings::BGR8) {
     cv::cvtColor(image, lab_image, CV_BGR2Lab);
   }
   else if (image_msg->encoding == sensor_msgs::image_encodings::RGB8) {
     cv::cvtColor(image, lab_image, CV_RGB2Lab);
   }
   else {
     NODELET_ERROR("unsupported format to Lab: %s", image_msg->encoding.c_str());
     return;
   }
   cv::split(lab_image, lab_planes);
   cv::Mat l = lab_planes[0];
   cv::Mat a = lab_planes[1];
   cv::Mat b = lab_planes[2];
   pub_l_.publish(cv_bridge::CvImage(
                    image_msg->header,
                    sensor_msgs::image_encodings::MONO8,
                    l).toImageMsg());
   pub_a_.publish(cv_bridge::CvImage(
                    image_msg->header,
                    sensor_msgs::image_encodings::MONO8,
                    a).toImageMsg());
   pub_b_.publish(cv_bridge::CvImage(
                    image_msg->header,
                    sensor_msgs::image_encodings::MONO8,
                    b).toImageMsg());
 }
 void NormalDirectionFilter::onInit()
 {
   DiagnosticNodelet::onInit();
   pnh_->param("use_imu", use_imu_, false);
   if (!use_imu_) {
     std::vector<double> direction;
     if (!jsk_topic_tools::readVectorParameter(*pnh_, "direction", direction)) {
       NODELET_ERROR("You need to specify ~direction");
       return;
     }
     jsk_recognition_utils::pointFromVectorToVector<std::vector<double>, Eigen::Vector3f>(
     direction, static_direction_);
   }
   else {
     tf_listener_ = TfListenerSingleton::getInstance();
   }
   
   srv_ = boost::make_shared <dynamic_reconfigure::Server<Config> > (*pnh_);
   dynamic_reconfigure::Server<Config>::CallbackType f =
     boost::bind (
       &NormalDirectionFilter::configCallback, this, _1, _2);
   srv_->setCallback (f);
   pnh_->param("queue_size", queue_size_, 200);
   pub_ = advertise<PCLIndicesMsg>(*pnh_, "output", 1);
   onInitPostProcess();
 }
Ejemplo n.º 14
0
void DriverNodelet::publishRgbImage(const ImageBuffer& image, ros::Time time) const
{
  sensor_msgs::ImagePtr rgb_msg = boost::make_shared<sensor_msgs::Image >();
  rgb_msg->header.stamp = time;
  rgb_msg->header.frame_id = rgb_frame_id_;
  rgb_msg->height = image.metadata.height;
  rgb_msg->width = image.metadata.width;
  switch(image.metadata.video_format) {
    case FREENECT_VIDEO_RGB:
      rgb_msg->encoding = sensor_msgs::image_encodings::RGB8;
      rgb_msg->step = rgb_msg->width * 3;
      break;
    case FREENECT_VIDEO_BAYER:
      rgb_msg->encoding = sensor_msgs::image_encodings::BAYER_GRBG8;
      rgb_msg->step = rgb_msg->width;
      break;
    case FREENECT_VIDEO_YUV_RGB:
      rgb_msg->encoding = sensor_msgs::image_encodings::YUV422;
      rgb_msg->step = rgb_msg->width * 2;
      break;
    default:
      NODELET_ERROR("Unknown RGB image format received from libfreenect");
      // Unknown encoding -- don't publish
      return;
  }
  rgb_msg->data.resize(rgb_msg->height * rgb_msg->step);
  fillImage(image, reinterpret_cast<void*>(&rgb_msg->data[0]));
  
  pub_rgb_.publish(rgb_msg, getRgbCameraInfo(image, time));
  if (enable_rgb_diagnostics_)
      pub_rgb_freq_->tick();
}
Ejemplo n.º 15
0
void DriverNodelet::rgbConnectCb()
{
  boost::lock_guard<boost::mutex> lock(connect_mutex_);
  bool need_rgb = pub_rgb_.getNumSubscribers() > 0;
  
  if (need_rgb && !device_->isImageStreamRunning())
  {
    // Can't stream IR and RGB at the same time. Give RGB preference.
    if (device_->isIRStreamRunning())
    {
      NODELET_ERROR("Cannot stream RGB and IR at the same time. Streaming RGB only.");
      device_->stopIRStream();
    }
    
    device_->startImageStream();
    startSynchronization();
    time_stamp_ = ros::Time(0,0); // starting an additional stream blocks for a while, could upset watchdog
  }
  else if (!need_rgb && device_->isImageStreamRunning())
  {
    stopSynchronization();
    device_->stopImageStream();

    // Start IR if it's been blocked on RGB subscribers
    bool need_ir = pub_ir_.getNumSubscribers() > 0;
    if (need_ir && !device_->isIRStreamRunning())
    {
      device_->startIRStream();
      time_stamp_ = ros::Time(0,0);
    }
  }
}
Ejemplo n.º 16
0
  void ColorHistogram::extractMask(
    const sensor_msgs::Image::ConstPtr& image,
    const sensor_msgs::Image::ConstPtr& mask_image)
  {
    try {
      cv_bridge::CvImagePtr cv_ptr
        = cv_bridge::toCvCopy(image, sensor_msgs::image_encodings::BGR8);
      cv_bridge::CvImagePtr mask_ptr
        = cv_bridge::toCvCopy(mask_image, sensor_msgs::image_encodings::MONO8);
      cv::Mat bgr_image = cv_ptr->image;
      cv::Mat mask_image = mask_ptr->image;
      cv::Mat masked_image;
      bgr_image.copyTo(masked_image, mask_image);
      sensor_msgs::Image::Ptr ros_masked_image
        = cv_bridge::CvImage(image->header,
                             sensor_msgs::image_encodings::BGR8,
                             masked_image).toImageMsg();
      image_pub_.publish(ros_masked_image);
      
      processBGR(bgr_image, mask_image, image->header);
      processHSI(bgr_image, mask_image, image->header);
      
    }
    catch (cv_bridge::Exception& e)
    {
      NODELET_ERROR("cv_bridge exception: %s", e.what());
      return;
    }

  }
 void TfTransformCloud::transform(const sensor_msgs::PointCloud2ConstPtr &input)
 {
   vital_checker_->poke();
   sensor_msgs::PointCloud2 output;
   try
   {
     if (use_latest_tf_) {
       sensor_msgs::PointCloud2 latest_pointcloud(*input);
       latest_pointcloud.header.stamp = ros::Time(0);
       if (pcl_ros::transformPointCloud(target_frame_id_, latest_pointcloud, output,
                                        *tf_listener_)) {
         output.header.stamp = input->header.stamp;
         pub_cloud_.publish(output);
       }
     }
     else {
       if (pcl_ros::transformPointCloud(target_frame_id_, *input, output,
                                        *tf_listener_)) {
         pub_cloud_.publish(output);
       }
     }
   }
   catch (tf2::ConnectivityException &e)
   {
     JSK_NODELET_ERROR("Transform error: %s", e.what());
   }
   catch (tf2::InvalidArgumentException &e)
   {
     JSK_NODELET_ERROR("Transform error: %s", e.what());
   }
   catch (...)
   {
     NODELET_ERROR("Unknown transform error");
   }
 }
void
pcl_ros::SegmentDifferences::input_target_callback (const PointCloudConstPtr &cloud, 
                                                    const PointCloudConstPtr &cloud_target)
{
  if (pub_output_.getNumSubscribers () <= 0)
    return;

  if (!isValid (cloud) || !isValid (cloud_target, "target")) 
  {
    NODELET_ERROR ("[%s::input_indices_callback] Invalid input!", getName ().c_str ());
    PointCloud output;
    output.header = cloud->header;
    pub_output_.publish (output.makeShared ());
    return;
  }

  NODELET_DEBUG ("[%s::input_indices_callback]\n"
                 "                                 - PointCloud with %d data points (%s), stamp %f, and frame %s on topic %s received.\n"
                 "                                 - PointCloud with %d data points (%s), stamp %f, and frame %s on topic %s received.",
                 getName ().c_str (),
                 cloud->width * cloud->height, pcl::getFieldsList (*cloud).c_str (), fromPCL(cloud->header).stamp.toSec (), cloud->header.frame_id.c_str (), pnh_->resolveName ("input").c_str (),
                 cloud_target->width * cloud_target->height, pcl::getFieldsList (*cloud_target).c_str (), fromPCL(cloud_target->header).stamp.toSec (), cloud_target->header.frame_id.c_str (), pnh_->resolveName ("target").c_str ());

  impl_.setInputCloud (cloud);
  impl_.setTargetCloud (cloud_target);

  PointCloud output;
  impl_.segment (output);

  pub_output_.publish (output.makeShared ());
  NODELET_DEBUG ("[%s::segmentAndPublish] Published PointCloud2 with %zu points and stamp %f on topic %s", getName ().c_str (),
                     output.points.size (), fromPCL(output.header).stamp.toSec (), pnh_->resolveName ("output").c_str ());
}
Ejemplo n.º 19
0
 void MaskImageToROI::convert(
   const sensor_msgs::Image::ConstPtr& mask_msg)
 {
   vital_checker_->poke();
   boost::mutex::scoped_lock lock(mutex_);
   if (latest_camera_info_) {
     sensor_msgs::CameraInfo camera_info(*latest_camera_info_);
     std::vector<cv::Point> indices;
     cv_bridge::CvImagePtr cv_ptr = cv_bridge::toCvCopy(
       mask_msg, sensor_msgs::image_encodings::MONO8);
     cv::Mat mask = cv_ptr->image;
     for (size_t j = 0; j < mask.rows; j++) {
       for (size_t i = 0; i < mask.cols; i++) {
         if (mask.at<uchar>(j, i) == 255) {
           indices.push_back(cv::Point(i, j));
         }
       }
     }
     cv::Rect mask_rect = cv::boundingRect(indices);
     camera_info.roi.x_offset = mask_rect.x;
     camera_info.roi.y_offset = mask_rect.y;
     camera_info.roi.width = mask_rect.width;
     camera_info.roi.height = mask_rect.height;
     camera_info.header = mask_msg->header;
     pub_.publish(camera_info);
   }
   else {
     NODELET_ERROR("camera info is not yet available");
   }
 }
void Dm32ToDmNodelet::cb(const sensor_msgs::ImageConstPtr &msg)
{
    //convert ROS image message to a CvImage.
    cv_bridge::CvImageConstPtr cv_ptr;
    try
    {
        cv_ptr = cv_bridge::toCvShare(msg, sensor_msgs::image_encodings::TYPE_32FC1);
    }
    catch (cv_bridge::Exception& e)
    {
        NODELET_ERROR("cv_bridge input exception: %s", e.what());
        return;
    }

    cv::Mat cvframe = cv::Mat::zeros(cv_ptr->image.size(), CV_16UC1);

    for (int i = 0; i < cv_ptr->image.rows; i++)
    { //rows
        for (int j = 0; j < cv_ptr->image.cols; j++)
        { //cols
            float depth = cv_ptr->image.at<float>(i,j);
            depth = depth / scale_;
            cvframe.at<unsigned short>(i,j) = (unsigned short) depth;
        }
    }

    //convert opencv image to ros image and publish it
    cv_bridge::CvImagePtr out_msg_ptr(new cv_bridge::CvImage);
    out_msg_ptr->header = msg->header;
    out_msg_ptr->encoding = sensor_msgs::image_encodings::TYPE_16UC1;
    out_msg_ptr->image = cvframe;
    image_pub_.publish(out_msg_ptr->toImageMsg());
}
void ControlMode::controlModeCmdCallback(const control_mode_cmdConstPtr& msg)
{
  NODELET_INFO_STREAM("Heard command: " << msg->cmd);
  if (msg->cmd == "mode idle")
  {
    state = ControlModeTypes::IDLE;
    info = "";
  }
  else if (msg->cmd == "mode standby")
  {
    state = ControlModeTypes::STANDBY;
    info = "";
  }
  else if (msg->cmd == "mode active")
  {
    if (state == ControlModeTypes::STANDBY)
    {
      state = ControlModeTypes::ACTIVE;
      info = "";
    }
    else
    {
      NODELET_ERROR("Invalid transition");
    }
  }
  else
  {
    NODELET_ERROR_STREAM("Command unknown: " << msg->cmd);
  }
}
Ejemplo n.º 22
0
 void updateCallback(const ros::TimerEvent& event)
 {
   // Download the most recent data from the device 
   camera_state_ = OK;
   state_info_ = "Camera operating nominally";
   try
   {
       boost::lock_guard<boost::recursive_mutex> lock(config_mutex_);
       camera_->update();
   }
   catch(std::exception &e)
   {
       //we have disconnected from the camera, try to reinitialize
       camera_state_ = ERROR;
       state_info_ = "Unable to read from camera!";
       NODELET_ERROR("%s", state_info_.c_str());
       boost::lock_guard<boost::recursive_mutex> lock(config_mutex_);
       camera_.reset();
       init_thread_ = boost::thread(boost::bind(&DriverNodelet::openCamera, this, _1), update_timer_);
       return;
   }
   camera_info_->header.frame_id = frame_id_;
   // Get new data and publish for the topics that have subscribers
   // Distance
   if (distance_publisher_.getNumSubscribers() > 0)
   {
       boost::lock_guard<boost::recursive_mutex> lock(config_mutex_);
       sensor_msgs::ImagePtr distance = camera_->getDistanceImage();
       distance->header.frame_id = frame_id_;
       camera_info_->header.stamp = distance->header.stamp;
       distance_publisher_.publish(distance, camera_info_);
   }
   // Depth
   if (depth_publisher_.getNumSubscribers() > 0)
   {
       boost::lock_guard<boost::recursive_mutex> lock(config_mutex_);
       sensor_msgs::ImagePtr depth = camera_->getDepthImage();
       depth->header.frame_id = frame_id_;
       camera_info_->header.stamp = depth->header.stamp;
       depth_publisher_.publish(depth, camera_info_);
   }
   // Amplitude
   if (amplitude_publisher_.getNumSubscribers() > 0)
   {
       boost::lock_guard<boost::recursive_mutex> lock(config_mutex_);
       sensor_msgs::ImagePtr amplitude = camera_->getAmplitudeImage();
       amplitude->header.frame_id = frame_id_;
       camera_info_->header.stamp = amplitude->header.stamp;
       amplitude_publisher_.publish(amplitude, camera_info_);
   }
   // Points
   if (points_publisher_.getNumSubscribers() > 0)
   {
       boost::lock_guard<boost::recursive_mutex> lock(config_mutex_);
       sensor_msgs::PointCloud2Ptr points = camera_->getPointCloud();
       points->header.frame_id = frame_id_;
       points_publisher_.publish(points);
   }
   updater.update();
 }
void Dm32ToDmNodelet::onInit()
{
    nh_ = getNodeHandle();
    private_nh_ = getPrivateNodeHandle();

    nh_.param("sensor/name", sensor_name_, std::string("realsenseR200"));

    image_transport::ImageTransport it(nh_);
    image_pub_ = it.advertise("dm32_to_dm/output", 1);
    sub_ = it.subscribe("dm32_to_dm/input", 1, &Dm32ToDmNodelet::cb, this);

    scale_ = 0.001;

    if(sensor_name_ == "realsenseF200")
    {
        sensor_depth_max_ = 1200;
    }
    else if(sensor_name_ == "realsenseR200")
    {
        private_nh_.param("sensor_depth_max", sensor_depth_max_, int(4000));
    }
    else if(sensor_name_ == "picoflexx")
    {
        nh_.param("sensor/depth/max", sensor_depth_max_, int(5000));
    }
    else {
        NODELET_ERROR("Wrong sensor name!");
        return;
    }

    NODELET_INFO_STREAM("Initialized dm32 to dm nodelet for sensor " << sensor_name_
                        << ", with sensor_depth_max=" << sensor_depth_max_);
}
Ejemplo n.º 24
0
void
pcl_ros::BAGReader::onInit ()
{
  boost::shared_ptr<ros::NodeHandle> pnh_;
  pnh_.reset (new ros::NodeHandle (getMTPrivateNodeHandle ()));
  // ---[ Mandatory parameters
  if (!pnh_->getParam ("file_name", file_name_))
  {
    NODELET_ERROR ("[onInit] Need a 'file_name' parameter to be set before continuing!");
    return;
  }
   if (!pnh_->getParam ("topic_name", topic_name_))
  {
    NODELET_ERROR ("[onInit] Need a 'topic_name' parameter to be set before continuing!"); 
    return;
  }
  // ---[ Optional parameters
  int max_queue_size = 1;
  pnh_->getParam ("publish_rate", publish_rate_);
  pnh_->getParam ("max_queue_size", max_queue_size);

  ros::Publisher pub_output = pnh_->advertise<sensor_msgs::PointCloud2> ("output", max_queue_size);

  NODELET_DEBUG ("[onInit] Nodelet successfully created with the following parameters:\n"
                 " - file_name    : %s\n"
                 " - topic_name   : %s",
                 file_name_.c_str (), topic_name_.c_str ());

  if (!open (file_name_, topic_name_))
    return;
  PointCloud output;
  output_ = boost::make_shared<PointCloud> (output);
  output_->header.stamp    = ros::Time::now ();

  // Continous publishing enabled?
  while (pnh_->ok ())
  {
    PointCloudConstPtr cloud = getNextCloud ();
    NODELET_DEBUG ("Publishing data (%d points) on topic %s in frame %s.", output_->width * output_->height, pnh_->resolveName ("output").c_str (), output_->header.frame_id.c_str ());
    output_->header.stamp = ros::Time::now ();

    pub_output.publish (output_);

    ros::Duration (publish_rate_).sleep ();
    ros::spinOnce ();
  }
}
        virtual void onInit() {
            ROS_INFO("Image Sink Nodelet 'onInit()' done.");
            // Get the NodeHandle
            private_nh = getPrivateNodeHandle();

            ROS_INFO(">>> LOOOOOOL %s", "LOL");

            if (private_nh.getParam("depthlookup_image_topic", depth_topic))
            {
                //NODELET_INFO(">>> Input depth image topic: " << depth_topic);
            } else {
                NODELET_ERROR("!Failed to get depth image topic parameter!");
                exit(EXIT_FAILURE);
            }
            if (private_nh.getParam("depthlookup_info_topic", depth_info))
            {
                //NODELET_INFO(">>> Input depth camera info topic: " << depth_info);
            } else {
                NODELET_ERROR("!Failed to get depth camera info topic parameter!");
                exit(EXIT_FAILURE);
            }

            if (private_nh.getParam("depthlookup_out_topic", out_topic))
            {
                //NODELET_INFO(">>> Output Topic: " << out_topic);
            } else {
                NODELET_ERROR("!Failed to get output topic parameter!");
                exit(EXIT_FAILURE);
            }

            if (private_nh.getParam("depthlookup_in_topic", in_topic))
            {
                //NODELET_INFO(">>> Input Topic: " << in_topic);
            } else {
                NODELET_ERROR("!Failed to get input topic parameter!");
                exit(EXIT_FAILURE);
            }

            message_filters::Subscriber<Image> image_sub(private_nh, depth_topic.c_str(), 5);
            message_filters::Subscriber<CameraInfo> info_sub(private_nh, depth_info.c_str(), 5);
            people_pub = private_nh.advertise<clf_perception_vision_msgs::ExtendedPeople>(out_topic.c_str(), 1);

            TimeSynchronizer<Image, CameraInfo> sync(image_sub, info_sub, 5);
            sync.registerCallback(boost::bind(&DepthLookup::depth_callback, this ,_1, _2));
        }
Ejemplo n.º 26
0
void ServoInterface::loadServoParams()
{
  //read in servo settings
  ros::NodeHandle nhPvt = getPrivateNodeHandle();
  XmlRpc::XmlRpcValue v;
  nhPvt.param("servos", v, v);
  if(v.getType() == XmlRpc::XmlRpcValue::TypeStruct)
  {
    XmlRpc::XmlRpcValue servoInfo;
    ServoSettings toAdd;
    for(int i = 0; i < v.size(); i++)
    {
      servoInfo = v[boost::lexical_cast<std::string>(i)];

      if(servoInfo.getType() == XmlRpc::XmlRpcValue::TypeStruct &&
         servoInfo.size() == 5)
      {
        toAdd.center = static_cast<int>(servoInfo["center"]);
        toAdd.min = static_cast<int>(servoInfo["min"]);
        toAdd.max = static_cast<int>(servoInfo["max"]);
        toAdd.range = toAdd.max-toAdd.min;
        toAdd.port = i;
        toAdd.reverse = static_cast<bool>(servoInfo["reverse"]);
        m_servoSettings[servoInfo["name"]] = toAdd;
      } else
      {
        NODELET_ERROR("ServoInterface: XmlRpc servo settings formatted incorrectly");
      }
    }
  } else
  {
    NODELET_ERROR("ServoInterface: Couldn't retreive servo settings");
  }
  NODELET_INFO("ServoInterface: Loaded %lu servos", m_servoSettings.size());

  if(m_servoSettings.find("frontBrake") != m_servoSettings.end())
  {
    m_brakeSetup.independentFront = true;
  }
  if(m_servoSettings.find("backBrake") != m_servoSettings.end())
  {
    m_brakeSetup.independentBack = true;
  }
}
 void NormalDirectionFilter::filter(
   const sensor_msgs::PointCloud2::ConstPtr& msg,
   const sensor_msgs::Imu::ConstPtr& imu_msg)
 {
   boost::mutex::scoped_lock lock(mutex_);
   vital_checker_->poke();
   pcl::PointCloud<pcl::Normal>::Ptr normal(new pcl::PointCloud<pcl::Normal>);
   pcl::fromROSMsg(*msg, *normal);
   geometry_msgs::Vector3Stamped stamped_imu, transformed_imu;
   stamped_imu.header = imu_msg->header;
   stamped_imu.vector = imu_msg->linear_acceleration;
   try
   {
     tf_listener_->waitForTransform(msg->header.frame_id, imu_msg->header.frame_id, imu_msg->header.stamp,
                                    ros::Duration(0.1));
     tf_listener_->transformVector(
       msg->header.frame_id, stamped_imu, transformed_imu);
     Eigen::Vector3d imu_vectord;
     Eigen::Vector3f imu_vector;
     tf::vectorMsgToEigen(transformed_imu.vector, imu_vectord);
     jsk_recognition_utils::pointFromVectorToVector<Eigen::Vector3d, Eigen::Vector3f>(
       imu_vectord, imu_vector);
     imu_vector.normalize();
     pcl::PointIndices indices;
     filterIndices(normal, imu_vector, indices);
     pcl_msgs::PointIndices ros_indices;
     pcl_conversions::fromPCL(indices, ros_indices);
     ros_indices.header = msg->header;
     pub_.publish(ros_indices);
   }
   catch (tf2::ConnectivityException &e)
   {
     NODELET_ERROR("[%s] Transform error: %s", __PRETTY_FUNCTION__, e.what());
   }
   catch (tf2::InvalidArgumentException &e)
   {
     NODELET_ERROR("[%s] Transform error: %s", __PRETTY_FUNCTION__, e.what());
   }
   catch (tf2::ExtrapolationException &e)
   {
     NODELET_ERROR("[%s] Transform error: %s", __PRETTY_FUNCTION__, e.what());
   }
 }
  void LineSegmentCollector::onInit()
  {
    DiagnosticNodelet::onInit();
    srv_ = boost::make_shared <dynamic_reconfigure::Server<Config> > (*pnh_);
    dynamic_reconfigure::Server<Config>::CallbackType f =
      boost::bind (&LineSegmentCollector::configCallback, this, _1, _2);
    srv_->setCallback (f);
    
    if (!pnh_->getParam("fixed_frame_id", fixed_frame_id_)) {
      NODELET_ERROR("no ~fixed_frame_id is specified");
      return;
    }

    std::string rotate_type_str;
    pnh_->param("rotate_type", rotate_type_str, std::string("tilt_two_way"));
    if (rotate_type_str == "tilt") {
      rotate_type_ = ROTATION_TILT;
    }
    else if (rotate_type_str == "tilt_two_way") {
      rotate_type_ = ROTATION_TILT_TWO_WAY;
    }
    else if (rotate_type_str == "spindle") {
      rotate_type_ = ROTATION_SPINDLE;
    }
    else {
      NODELET_ERROR("unknown ~rotate_type: %s", rotate_type_str.c_str());
      return;
    }
    
    pub_point_cloud_
      = advertise<sensor_msgs::PointCloud2>(*pnh_, "output/cloud", 1);
    pub_inliers_ = advertise<jsk_recognition_msgs::ClusterPointIndices>(*pnh_, "output/inliers", 1);
    pub_coefficients_
      = advertise<jsk_recognition_msgs::ModelCoefficientsArray>(*pnh_, "output/coefficients", 1);
    pub_polygons_
      = advertise<jsk_recognition_msgs::PolygonArray>(*pnh_, "output/polygons", 1);
    debug_pub_inliers_before_plane_
      = advertise<jsk_recognition_msgs::ClusterPointIndices>(
        *pnh_, "debug/connect_segments/inliers", 1);

    onInitPostProcess();
  }
Ejemplo n.º 29
0
OutputMode DriverNodelet::mapConfigMode2OutputMode (int mode) const
{
  std::map<int, OutputMode>::const_iterator it = config2mode_map_.find (mode);
  if (it == config2mode_map_.end ())
  {
    NODELET_ERROR ("mode %d could not be found", mode);
    exit (-1);
  }
  else
    return it->second;
}
Ejemplo n.º 30
0
  /*!
  * \brief Function that allows reconfiguration of the camera.
  *
  * This function serves as a callback for the dynamic reconfigure service.  It simply passes the configuration object to the driver to allow the camera to reconfigure.
  * \param config  camera_library::CameraConfig object passed by reference.  Values will be changed to those the driver is currently using.
  * \param level driver_base reconfiguration level.  See driver_base/SensorLevels.h for more information.
  */
  void paramCallback(pointgrey_camera_driver::PointGreyConfig &config, uint32_t level)
  {
    try
    {
      NODELET_DEBUG("Dynamic reconfigure callback with level: %d", level);
      pg_.setNewConfiguration(config, level);

      // Store needed parameters for the metadata message
      gain_ = config.gain;
      wb_blue_ = config.white_balance_blue;
      wb_red_ = config.white_balance_red;

      // Store CameraInfo binning information
      if(config.video_mode == "640x480_mono8" || config.video_mode == "format7_mode1")
      {
        binning_x_ = 2;
        binning_y_ = 2;
      }
      else if(config.video_mode == "format7_mode2")
      {
        binning_x_ = 0;
        binning_y_ = 2;
      }
      else
      {
        binning_x_ = 0;
        binning_y_ = 0;
      }

      // Store CameraInfo RegionOfInterest information
      if(config.video_mode == "format7_mode0" || config.video_mode == "format7_mode1" || config.video_mode == "format7_mode2")
      {
        roi_x_offset_ = config.format7_x_offset;
        roi_y_offset_ = config.format7_y_offset;
        roi_width_ = config.format7_roi_width;
        roi_height_ = config.format7_roi_height;
        do_rectify_ = true; // Set to true if an ROI is used.
      }
      else
      {
        // Zeros mean the full resolution was captured.
        roi_x_offset_ = 0;
        roi_y_offset_ = 0;
        roi_height_ = 0;
        roi_width_ = 0;
        do_rectify_ = false; // Set to false if the whole image is captured.
      }
    }
    catch(std::runtime_error& e)
    {
      NODELET_ERROR("Reconfigure Callback failed with error: %s", e.what());
    }
  }