// NOTE: Not used in SPENCER! We use the version with ground plane below!
void imageCallback(const Image::ConstPtr &msg)
{
    //    ROS_INFO("Entered img callback");
    std::vector<cudaHOG::Detection> detHog;

    //  unsigned char image
    QImage image_rgb(&msg->data[0], msg->width, msg->height, QImage::Format_RGB888);
    int returnPrepare = hog->prepare_image(image_rgb.convertToFormat(QImage::Format_ARGB32).bits(), (short unsigned int) msg->width, (short unsigned int) 	msg->height);

    if(returnPrepare)
    {
        ROS_ERROR("groundHOG: Error while preparing the image");
        return;
    }

    hog->test_image(detHog);
    hog->release_image();

    int w = 64, h = 128;

    GroundHOGDetections detections;

    detections.header = msg->header;
    for(unsigned int i=0;i<detHog.size();i++)
    {
        float score = detHog[i].score;
        float scale = detHog[i].scale;

        float width = (w - 32.0f)*scale;
        float height = (h - 32.0f)*scale;
        float x = (detHog[i].x + 16.0f*scale);
        float y = (detHog[i].y + 16.0f*scale);

        detections.scale.push_back(scale);
        detections.score.push_back(score);
        detections.pos_x.push_back(x);
        detections.pos_y.push_back(y);
        detections.width.push_back(width);
        detections.height.push_back(height);

    }

    if(pub_result_image.getNumSubscribers()) {
        ROS_DEBUG("Publishing image");
        render_bbox_2D(detections, image_rgb, 255, 0, 0, 2);

        Image sensor_image;
        sensor_image.header = msg->header;
        sensor_image.height = image_rgb.height();
        sensor_image.width  = image_rgb.width();
        sensor_image.step   = msg->step;
        vector<unsigned char> image_bits(image_rgb.bits(), image_rgb.bits()+sensor_image.height*sensor_image.width*3);
        sensor_image.data = image_bits;
        sensor_image.encoding = msg->encoding;

        pub_result_image.publish(sensor_image);
    }

    pub_message.publish(detections);
}
	inline void publish(Mat img, imgColorType t, ros::Time now =
			ros::Time::now())
	{
		hasListener=(img_pub.getNumSubscribers() > 0);
		if (!enable || !hasListener)
			return;
		Mat res;
		switch (t)
		{
		case gray:
			msg.encoding = sensor_msgs::image_encodings::MONO8;
			res = img;
			break;
		case hsv:
			msg.encoding = sensor_msgs::image_encodings::BGR8;
			cvtColor(img, res, CV_HSV2BGR);
			break;
		case bgr:
		default:
			msg.encoding = sensor_msgs::image_encodings::BGR8;
			res = img;
			break;
		}
		msg.header.stamp = now;
		msg.image = res;
		img_pub.publish(msg.toImageMsg());
	}
	inline bool thereAreListeners(bool loadNow=true) //This will be zero if we close rqt
	{
		if(loadNow)
		{
			hasListener=(img_pub.getNumSubscribers() > 0);
		}
		return hasListener;
	}
// Handles (un)subscribing when clients (un)subscribe
void ConvertMetricNodelet::connectCb()
{
  boost::lock_guard<boost::mutex> lock(connect_mutex_);
  if (pub_depth_.getNumSubscribers() == 0)
  {
    sub_raw_.shutdown();
  }
  else if (!sub_raw_)
  {
    image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle());
    sub_raw_ = it_->subscribe("image_raw", 1, &ConvertMetricNodelet::depthCb, this, hints);
  }
}
Beispiel #5
0
void imgReceivedCallback(const sensor_msgs::ImageConstPtr & msg)
{
	if(rosPublisher.getNumSubscribers() && msg->data.size())
	{
		cv_bridge::CvImageConstPtr ptr = cv_bridge::toCvShare(msg);
		if(ptr->image.depth() == CV_8U && ptr->image.channels() == 3)
		{
			cv::Mat motion = ptr->image.clone();
			if(previousImage.cols == motion.cols && previousImage.rows == motion.rows)
			{
				unsigned char * imageData = (unsigned char *)motion.data;
				unsigned char * previous_imageData = (unsigned char *)previousImage.data;
				int widthStep = motion.cols * motion.elemSize();
				for(int j=0; j<motion.rows; ++j)
				{
					for(int i=0; i<motion.cols; ++i)
					{
						float b = (float)imageData[j*widthStep+i*3+0];
						float g = (float)imageData[j*widthStep+i*3+1];
						float r = (float)imageData[j*widthStep+i*3+2];
						float previous_b = (float)previous_imageData[j*widthStep+i*3+0];
						float previous_g = (float)previous_imageData[j*widthStep+i*3+1];
						float previous_r = (float)previous_imageData[j*widthStep+i*3+2];

						if(!(fabs(b-previous_b)/256.0f>=ratio || fabs(g-previous_g)/256.0f >= ratio || fabs(r-previous_r)/256.0f >= ratio))
						{
							imageData[j*widthStep+i*3+0] = 0;
							imageData[j*widthStep+i*3+1] = 0;
							imageData[j*widthStep+i*3+2] = 0;
						}
					}
				}
			}
			previousImage = ptr->image.clone();

			cv_bridge::CvImage img;
			img.header = ptr->header;
			img.encoding = ptr->encoding;
			img.image = motion;
			rosPublisher.publish(img.toImageMsg());		
		}
		else
		{
			ROS_WARN("Image format should be 8bits - 3 channels");
		}
	}
}
// Connection callback that unsubscribes from the tracker if no one is subscribed.
void connectCallback(message_filters::Subscriber<CameraInfo> &sub_cam,
                     message_filters::Subscriber<GroundPlane> &sub_gp,
                     image_transport::SubscriberFilter &sub_col,
                     image_transport::SubscriberFilter &sub_dep,
                     image_transport::ImageTransport &it) {
    if(!pub_message.getNumSubscribers() && !pub_result_image.getNumSubscribers() && !pub_centres.getNumSubscribers() && !pub_detected_persons.getNumSubscribers()) {
        ROS_DEBUG("Upper Body Detector: No subscribers. Unsubscribing.");
        sub_cam.unsubscribe();
        sub_gp.unsubscribe();
        sub_col.unsubscribe();
        sub_dep.unsubscribe();
    } else {
        ROS_DEBUG("Upper Body Detector: New subscribers. Subscribing.");
        sub_cam.subscribe();
        sub_gp.subscribe();
        sub_col.subscribe(it,sub_col.getTopic().c_str(),1);
        sub_dep.subscribe(it,sub_dep.getTopic().c_str(),1);
    }
}
// Connection callback that unsubscribes from the tracker if no one is subscribed.
void connectCallback(ros::Subscriber &sub_msg,
                     ros::NodeHandle &n,
                     string gp_topic,
                     string img_topic,
                     Subscriber<GroundPlane> &sub_gp,
                     Subscriber<CameraInfo> &sub_cam,
                     image_transport::SubscriberFilter &sub_col,
                     image_transport::ImageTransport &it){
    if(!pub_message.getNumSubscribers() && !pub_result_image.getNumSubscribers() && !pub_detected_persons.getNumSubscribers()) {
        ROS_DEBUG("HOG: No subscribers. Unsubscribing.");
        sub_msg.shutdown();
        sub_gp.unsubscribe();
        sub_cam.unsubscribe();
        sub_col.unsubscribe();
    } else {
        ROS_DEBUG("HOG: New subscribers. Subscribing.");
        if(strcmp(gp_topic.c_str(), "") == 0) {
            sub_msg = n.subscribe(img_topic.c_str(), 1, &imageCallback);
        }
        sub_cam.subscribe();
        sub_gp.subscribe();
        sub_col.subscribe(it,sub_col.getTopic().c_str(),1);
    }
}
void disparityCb(const stereo_msgs::DisparityImageConstPtr& msg)
{
    if(cam_pub.getNumSubscribers() > 0 ||
       mask_pub.getNumSubscribers() > 0)
    {
        //    double ticksBefore = cv::getTickCount();
        // Check for common errors in input
        if (msg->min_disparity == 0.0 && msg->max_disparity == 0.0)
        {
            ROS_ERROR("Disparity image fields min_disparity and max_disparity are not set");
            return;
        }
        if (msg->image.encoding != sensor_msgs::image_encodings::TYPE_32FC1)
        {
            ROS_ERROR("Disparity image must be 32-bit floating point (encoding '32FC1'), but has encoding '%s'",
                      msg->image.encoding.c_str());
            return;
        }

        // code taken from image_view / disparity_view
        // Colormap and display the disparity image
        float min_disparity = msg->min_disparity;
        float max_disparity = msg->max_disparity;
        float multiplier = 255.0f / (max_disparity - min_disparity);

        const cv::Mat_<float> dmat(msg->image.height, msg->image.width,
                                   (float*)&msg->image.data[0], msg->image.step);
        cv::Mat disparity_greyscale;
        disparity_greyscale.create(msg->image.height, msg->image.width, CV_8UC1);

        for (int row = 0; row < disparity_greyscale.rows; ++row) {
            const float* d = dmat[row];
            for (int col = 0; col < disparity_greyscale.cols; ++col) {
                int index = (d[col] - min_disparity) * multiplier + 0.5;

                //index = std::min(255, std::max(0, index));
                // pushing it into the interval does not matter because of the threshold afterwards
                if(index >= threshold)
                    disparity_greyscale.at<uchar>(row, col) = 255;
                else
                    disparity_greyscale.at<uchar>(row, col) = 0;
            }
        }

        cv::Mat tmp1, mask;
        cv::erode(disparity_greyscale, tmp1,
                  cv::Mat::ones(erode_size, erode_size, CV_8UC1),
                  cv::Point(-1, -1), erode_iterations);
        cv::dilate(tmp1, mask,
                   cv::Mat::ones(dilate_size, dilate_size, CV_8UC1),
                   cv::Point(-1, -1), dilate_iterations);

        if(mask_pub.getNumSubscribers() > 0)
        {
            cv_bridge::CvImage mask_msg;
            mask_msg.header = msg->header;
            mask_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
            mask_msg.image = mask;
            mask_pub.publish(mask_msg.toImageMsg());
        }

        if(!image_rect.empty() && cam_pub.getNumSubscribers() > 0)
        {
            cv::Mat masked;
            image_rect.copyTo(masked, mask);
            cv_bridge::CvImage masked_msg;
            masked_msg.header = msg->header;
            masked_msg.encoding = sensor_msgs::image_encodings::BGR8;

            //if we want rescale then rescale
            if(scale_factor > 1)
            {
                cv::Mat masked_tmp = masked;
                cv::resize(masked_tmp, masked,
                           cv::Size(masked.cols*scale_factor, masked.rows*scale_factor));
            }
            masked_msg.image = masked;
            // to provide a synchronized output we publish both topics with the same timestamp
            ros::Time currentTime    = ros::Time::now();
            masked_msg.header.stamp  = currentTime;
            camera_info.header.stamp = currentTime;
            cam_pub.publish(*masked_msg.toImageMsg(), camera_info);
        }

        //    ROS_INFO("disparityCb runtime: %f ms",
        //             1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
    }
}
  void image_callback(const sensor_msgs::ImageConstPtr& msg)
  {
    static tf::TransformBroadcaster br;

    
    if(cam_info_received)
    {
      ros::Time curr_stamp(ros::Time::now());
      cv_bridge::CvImagePtr cv_ptr;
      try
      {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
        inImage = cv_ptr->image;

        //detection results will go into "markers"
        markers.clear();
        //Ok, let's detect

        mDetector.detect(inImage, markers, camParam, marker_size, false);
		
		//ROS_INFO("x: %0.3f y: %0.3f z: %0.3f", markers[0].Tvec.at<float>(0,0), markers[0].Tvec.at<float>(0,1), markers[0].Tvec.at<float>(0,2));	
		
        //for each marker, draw info and its boundaries in the image
        for(size_t i=0; i<markers.size(); ++i)
        {
          // only publishing the selected marker

          //----------------------add all pose messages of markers detected --------------------------------------
          //if(markers[i].id == marker_id)


          if(markers[i].id == 26 || markers[i].id == 35 || markers[i].id == 58 || markers[i].id == 163 || markers[i].id == 640 || markers[i].id == 512 || markers[i].id == 43 || markers[i].id == 291 || markers[i].id == 355)

          {
		  // by Weiwei
			//ROS_INFO("x: %0.3f y: %0.3f z: %0.3f", markers[i].Tvec.at<float>(0,0), markers[i].Tvec.at<float>(0,1), markers[i].Tvec.at<float>(0,2));		
			// 
			
            tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
			
			

			
            tf::StampedTransform cameraToReference;
            cameraToReference.setIdentity();

            if ( reference_frame != camera_frame )
            {
              getTransform(reference_frame,
                           camera_frame,
                           cameraToReference);
            }

            transform = 
              static_cast<tf::Transform>(cameraToReference) 
              * static_cast<tf::Transform>(rightToLeft) 
              * transform;

            tf::StampedTransform stampedTransform(transform, curr_stamp,
                                                  reference_frame, marker_frame);
            br.sendTransform(stampedTransform);
            geometry_msgs::PoseStamped poseMsg;
			
            tf::poseTFToMsg(transform, poseMsg.pose);
            poseMsg.header.frame_id = reference_frame;
            poseMsg.header.stamp = curr_stamp;


            double aruco_roll_, aruco_pitch_, aruco_yaw_;
            tf::Quaternion aruco_quat_;
            tf::quaternionMsgToTF(poseMsg.pose.orientation, aruco_quat_);
            tf::Matrix3x3(aruco_quat_).getRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);

            //ROS_INFO("April Tag RPY: [%0.3f, %0.3f, %0.3f]", aruco_roll_*180/3.1415926, aruco_pitch_*180/3.1415926, aruco_yaw_*180/3.1415926);
//-------------------------unified coordinate systems of pose----------------------------
            aruco_yaw_ = aruco_yaw_*180/3.141592;
            printf("Original [x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n", poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);

            if (markers[i].id == 26 || markers[i].id == 58)
            {
              float PI = 3.1415926;
              float ang = PI*3/4;
              float x_0 = -0.045;//-0.015;
              float y_0 = -0.015;//0.045;
              

              poseMsg.pose.position.x = x_0 + poseMsg.pose.position.x;// + poseMsg.pose.position.x * cos(-ang) - poseMsg.pose.position.y * sin(-ang); 
              poseMsg.pose.position.y = y_0 + poseMsg.pose.position.y;// + poseMsg.pose.position.x * sin(-ang) + poseMsg.pose.position.y * cos(-ang);

              //printf("[x, y] = [%0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y);

              aruco_yaw_ = aruco_yaw_ + ang*180/PI; 
              printf("[x, y, yaw] = [%0.3f, %0.3f, %0.3f]\n",poseMsg.pose.position.x, poseMsg.pose.position.y, aruco_yaw_);

              //printf("-----------unify the coordinate system ---------------------\n"); 
            }

            //printf("------------------------------------------------------------------\n-----------------aruco_yaw_ = %0.3f\n", aruco_yaw_);
            //printf("------------------------------------------------------------------\n");

            double temp_x = poseMsg.pose.position.x;
            double temp_y = poseMsg.pose.position.y;

            poseMsg.pose.position.x = -temp_y;
            poseMsg.pose.position.y = -temp_x;

            tf::Quaternion quat = tf::createQuaternionFromRPY(aruco_roll_, aruco_pitch_, aruco_yaw_);
            
            poseMsg.pose.orientation.x = quat.x();
            poseMsg.pose.orientation.y = quat.y();
            poseMsg.pose.orientation.z = quat.z();
            poseMsg.pose.orientation.w = quat.w();   

            pose_pub.publish(poseMsg);


            geometry_msgs::TransformStamped transformMsg;
            tf::transformStampedTFToMsg(stampedTransform, transformMsg);
            transform_pub.publish(transformMsg);

            geometry_msgs::Vector3Stamped positionMsg;
            positionMsg.header = transformMsg.header;
            positionMsg.vector = transformMsg.transform.translation;
            position_pub.publish(positionMsg);
          }
          // but drawing all the detected markers
          markers[i].draw(inImage,cv::Scalar(0,0,255),2);
        }

        //draw a 3d cube in each marker if there is 3d info
        if(camParam.isValid() && marker_size!=-1)
        {
          for(size_t i=0; i<markers.size(); ++i)
          {
            CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
          }
        }

        if(image_pub.getNumSubscribers() > 0)
        {
          //show input with augmented information
          cv_bridge::CvImage out_msg;
          out_msg.header.stamp = curr_stamp;
          out_msg.encoding = sensor_msgs::image_encodings::RGB8;
          out_msg.image = inImage;
          image_pub.publish(out_msg.toImageMsg());
        }

        if(debug_pub.getNumSubscribers() > 0)
        {
          //show also the internal image resulting from the threshold operation
          cv_bridge::CvImage debug_msg;
          debug_msg.header.stamp = curr_stamp;
          debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
          debug_msg.image = mDetector.getThresholdedImage();
          debug_pub.publish(debug_msg.toImageMsg());
        }
      }
      catch (cv_bridge::Exception& e)
      {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
      }
    }
  }
void callback(const ImageConstPtr &depth,  const ImageConstPtr &color,const GroundPlane::ConstPtr &gp, const CameraInfoConstPtr &info)
{
    // Check if calculation is necessary
    bool detect = pub_message.getNumSubscribers() > 0 || pub_centres.getNumSubscribers() > 0 || pub_detected_persons.getNumSubscribers() > 0;
    bool vis = pub_result_image.getNumSubscribers() > 0;

    if(!detect && !vis)
        return;

    // Get depth image as matrix
    cv_depth_ptr = cv_bridge::toCvCopy(depth);
    img_depth_ = cv_depth_ptr->image;
    Matrix<double> matrix_depth(info->width, info->height);
    for (int r = 0;r < 480;r++){
        for (int c = 0;c < 640;c++) {
            matrix_depth(c, r) = img_depth_.at<float>(r,c);
        }
    }

    // Generate base camera
    Matrix<double> R = Eye<double>(3);
    Vector<double> t(3, 0.0);
    Matrix<double> K(3,3, (double*)&info->K[0]);

    // Get GP
    Vector<double> GP(3, (double*) &gp->n[0]);
    GP.pushBack((double) gp->d);

    // Detect upper bodies
    Camera camera(K,R,t,GP);
    PointCloud point_cloud(camera, matrix_depth);
    Vector<Vector< double > > detected_bounding_boxes;
    detector->ProcessFrame(camera, matrix_depth, point_cloud, upper_body_template, detected_bounding_boxes);

    // Generate messages
    UpperBodyDetector detection_msg;
    geometry_msgs::PoseArray bb_centres;
    spencer_tracking_msgs::DetectedPersons detected_persons;

    detection_msg.header = depth->header;
    bb_centres.header    = depth->header;
    detected_persons.header = depth->header;

    for(int i = 0; i < detected_bounding_boxes.getSize(); i++)
    {
        // Custom detections message
        detection_msg.pos_x.push_back(detected_bounding_boxes(i)(0));
        detection_msg.pos_y.push_back(detected_bounding_boxes(i)(1));
        detection_msg.width.push_back(detected_bounding_boxes(i)(2));
        detection_msg.height.push_back(detected_bounding_boxes(i)(3));
        detection_msg.dist.push_back(detected_bounding_boxes(i)(4));
        detection_msg.median_depth.push_back(detected_bounding_boxes(i)(5));

        // Calculate centres of bounding boxes
        double mid_point_x = detected_bounding_boxes(i)(0)+detected_bounding_boxes(i)(2)/2.0;
        double mid_point_y = detected_bounding_boxes(i)(1)+detected_bounding_boxes(i)(3)/2.0;

        // PoseArray message for boundingbox centres
        geometry_msgs::Pose pose;
        pose.position.x = detected_bounding_boxes(i)(5)*((mid_point_x-K(2,0))/K(0,0));
        pose.position.y = detected_bounding_boxes(i)(5)*((mid_point_y-K(2,1))/K(1,1));
        pose.position.z = detected_bounding_boxes(i)(5);
        pose.orientation.w = 1.0; //No rotation atm.
        bb_centres.poses.push_back(pose);

        // DetectedPerson for SPENCER
        spencer_tracking_msgs::DetectedPerson detected_person;
        detected_person.modality = spencer_tracking_msgs::DetectedPerson::MODALITY_GENERIC_RGBD;
        detected_person.confidence = detected_bounding_boxes(i)(4); // FIXME: normalize
        detected_person.pose.pose = pose;

        const double LARGE_VARIANCE = 999999999;
        detected_person.pose.covariance[0*6 + 0] = pose_variance;
        detected_person.pose.covariance[1*6 + 1] = pose_variance; // up axis (since this is in sensor frame!)
        detected_person.pose.covariance[2*6 + 2] = pose_variance;
        detected_person.pose.covariance[3*6 + 3] = LARGE_VARIANCE;
        detected_person.pose.covariance[4*6 + 4] = LARGE_VARIANCE;
        detected_person.pose.covariance[5*6 + 5] = LARGE_VARIANCE;

        detected_person.detection_id = current_detection_id;
        current_detection_id += detection_id_increment;

        detected_persons.detections.push_back(detected_person);  
    }

    // Creating a ros image with the detection results an publishing it
    if(vis) {
        ROS_DEBUG("Publishing image");
        QImage image_rgb(&color->data[0], color->width, color->height, QImage::Format_RGB888); // would opencv be better?
        render_bbox_2D(detection_msg, image_rgb, 0, 0, 255, 2);

        sensor_msgs::Image sensor_image;
        sensor_image.header = color->header;
        sensor_image.height = image_rgb.height();
        sensor_image.width  = image_rgb.width();
        sensor_image.step   = color->step;
        vector<unsigned char> image_bits(image_rgb.bits(), image_rgb.bits()+sensor_image.height*sensor_image.width*3);
        sensor_image.data = image_bits;
        sensor_image.encoding = color->encoding;

        pub_result_image.publish(sensor_image);
    }

    // Publishing detections
    pub_message.publish(detection_msg);
    pub_centres.publish(bb_centres);
    pub_detected_persons.publish(detected_persons);
}
Beispiel #11
0
void image_callback(const sensor_msgs::ImageConstPtr& msg)
{
  double ticksBefore = cv::getTickCount();
  static tf::TransformBroadcaster br;
  if(cam_info_received)
  {
    cv_bridge::CvImagePtr cv_ptr;
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
      inImage = cv_ptr->image;

      if(normalizeImageIllumination)
      {
        cv::Mat inImageNorm;
        pal_vision_util::dctNormalization(inImage, inImageNorm, dctComponentsToRemove);
        inImage = inImageNorm;
      }

      //detection results will go into "markers"
      markers.clear();
      //Ok, let's detect
      mDetector.detect(inImage, markers, camParam, marker_size);
      //for each marker, draw info and its boundaries in the image
      for(unsigned int i=0; i<markers.size(); ++i)
      {
        // only publishing the selected marker
        if ( markers[i].id == marker_id1 )
        {
          tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
          br.sendTransform(tf::StampedTransform(transform, ros::Time::now(),
                                                parent_name, child_name1));
          geometry_msgs::Pose poseMsg;
          tf::poseTFToMsg(transform, poseMsg);
          pose_pub1.publish(poseMsg);
        }
        else if ( markers[i].id == marker_id2 )
        {
          tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
          br.sendTransform(tf::StampedTransform(transform, ros::Time::now(),
                                                parent_name, child_name2));
          geometry_msgs::Pose poseMsg;
          tf::poseTFToMsg(transform, poseMsg);
          pose_pub2.publish(poseMsg);
        }

        // but drawing all the detected markers
        markers[i].draw(inImage,Scalar(0,0,255),2);
      }

      //paint a circle in the center of the image
      cv::circle(inImage, cv::Point(inImage.cols/2, inImage.rows/2), 4, cv::Scalar(0,255,0), 1);

      if ( markers.size() == 2 )
      {
        float x[2], y[2], u[2], v[2];
        for (unsigned int i = 0; i < 2; ++i)
        {
          ROS_DEBUG_STREAM("Marker(" << i << ") at camera coordinates = ("
                           << markers[i].Tvec.at<float>(0,0) << ", "
                           << markers[i].Tvec.at<float>(1,0) << ", "
                           << markers[i].Tvec.at<float>(2,0));
          //normalized coordinates of the marker
          x[i] = markers[i].Tvec.at<float>(0,0)/markers[i].Tvec.at<float>(2,0);
          y[i] = markers[i].Tvec.at<float>(1,0)/markers[i].Tvec.at<float>(2,0);
          //undistorted pixel
          u[i] = x[i]*camParam.CameraMatrix.at<float>(0,0) +
              camParam.CameraMatrix.at<float>(0,2);
          v[i] = y[i]*camParam.CameraMatrix.at<float>(1,1) +
              camParam.CameraMatrix.at<float>(1,2);
        }

        ROS_DEBUG_STREAM("Mid point between the two markers in the image = ("
                         << (x[0]+x[1])/2 << ", " << (y[0]+y[1])/2 << ")");

        //              //paint a circle in the mid point of the normalized coordinates of both markers
        //              cv::circle(inImage,
        //                         cv::Point((u[0]+u[1])/2, (v[0]+v[1])/2),
        //                         3, cv::Scalar(0,0,255), CV_FILLED);


        //compute the midpoint in 3D:
        float midPoint3D[3]; //3D point
        for (unsigned int i = 0; i < 3; ++i )
          midPoint3D[i] = ( markers[0].Tvec.at<float>(i,0) +
                            markers[1].Tvec.at<float>(i,0) ) / 2;
        //now project the 3D mid point to normalized coordinates
        float midPointNormalized[2];
        midPointNormalized[0] = midPoint3D[0]/midPoint3D[2]; //x
        midPointNormalized[1] = midPoint3D[1]/midPoint3D[2]; //y
        u[0] = midPointNormalized[0]*camParam.CameraMatrix.at<float>(0,0) +
            camParam.CameraMatrix.at<float>(0,2);
        v[0] = midPointNormalized[1]*camParam.CameraMatrix.at<float>(1,1) +
            camParam.CameraMatrix.at<float>(1,2);

        ROS_DEBUG_STREAM("3D Mid point between the two markers in undistorted pixel coordinates = ("
                         << u[0] << ", " << v[0] << ")");

        //paint a circle in the mid point of the normalized coordinates of both markers
        cv::circle(inImage,
                   cv::Point(u[0], v[0]),
                   3, cv::Scalar(0,0,255), CV_FILLED);

      }

      //draw a 3d cube in each marker if there is 3d info
      if(camParam.isValid() && marker_size!=-1)
      {
        for(unsigned int i=0; i<markers.size(); ++i)
        {
          CvDrawingUtils::draw3dCube(inImage, markers[i], camParam);
        }
      }

      if(image_pub.getNumSubscribers() > 0)
      {
        //show input with augmented information
        cv_bridge::CvImage out_msg;
        out_msg.header.stamp = ros::Time::now();
        out_msg.encoding = sensor_msgs::image_encodings::RGB8;
        out_msg.image = inImage;
        image_pub.publish(out_msg.toImageMsg());
      }

      if(debug_pub.getNumSubscribers() > 0)
      {
        //show also the internal image resulting from the threshold operation
        cv_bridge::CvImage debug_msg;
        debug_msg.header.stamp = ros::Time::now();
        debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
        debug_msg.image = mDetector.getThresholdedImage();
        debug_pub.publish(debug_msg.toImageMsg());
      }

      ROS_DEBUG("runtime: %f ms",
                1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }
  }
}
		void image_callback(const sensor_msgs::ImageConstPtr& msg)
		{
			if(!cam_info_received) return;

			cv_bridge::CvImagePtr cv_ptr;
			try
			{
				cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
				inImage = cv_ptr->image;
				resultImg = cv_ptr->image.clone();

				//detection results will go into "markers"
				markers.clear();

				//Ok, let's detect
				double min_size = boards[0].marker_size;
				for (int board_index = 1; board_index < boards.size(); board_index++)
					if (min_size > boards[board_index].marker_size) min_size = boards[board_index].marker_size;
				mDetector.detect(inImage, markers, camParam, min_size, false);


				for (int board_index = 0; board_index < boards.size(); board_index++)
				{
					Board board_detected;

					//Detection of the board
					float probDetect = the_board_detector.detect(markers, boards[board_index].config, board_detected, camParam, boards[board_index].marker_size);
					if (probDetect > 0.0)
					{
						tf::Transform transform = ar_sys::getTf(board_detected.Rvec, board_detected.Tvec);

						tf::StampedTransform stampedTransform(transform, ros::Time::now(), "apollon_cam_right", boards[board_index].name +"_right");

						//_tfBraodcaster.sendTransform(stampedTransform);		// from phillip

						/*geometry_msgs::PoseStamped poseMsg;
						tf::poseTFToMsg(transform, poseMsg.pose);
						poseMsg.header.frame_id = msg->header.frame_id;
						poseMsg.header.stamp = msg->header.stamp;
						pose_pub.publish(poseMsg);*/

						geometry_msgs::TransformStamped transformMsg;
						tf::transformStampedTFToMsg(stampedTransform, transformMsg);
						transform_pub.publish(transformMsg);

						/*geometry_msgs::Vector3Stamped positionMsg;
						positionMsg.header = transformMsg.header;
						positionMsg.vector = transformMsg.transform.translation;
						position_pub.publish(positionMsg);*/

						if(camParam.isValid())
						{
							//draw board axis
							CvDrawingUtils::draw3dAxis(resultImg, board_detected, camParam);
						}
					}
				}

				//for each marker, draw info and its boundaries in the image
				for(size_t i=0; draw_markers && i < markers.size(); ++i)
				{
					markers[i].draw(resultImg,cv::Scalar(0,0,255),2);
				}

				if(camParam.isValid())
				{
					//draw a 3d cube in each marker if there is 3d info
					for(size_t i=0; i<markers.size(); ++i)
					{
						if (draw_markers_cube) CvDrawingUtils::draw3dCube(resultImg, markers[i], camParam);
						if (draw_markers_axis) CvDrawingUtils::draw3dAxis(resultImg, markers[i], camParam);
					}
				}

				if(image_pub.getNumSubscribers() > 0)
				{
					//show input with augmented information
					cv_bridge::CvImage out_msg;
					out_msg.header.frame_id = msg->header.frame_id;
					out_msg.header.stamp = msg->header.stamp;
					out_msg.encoding = sensor_msgs::image_encodings::RGB8;
					out_msg.image = resultImg;
					image_pub.publish(out_msg.toImageMsg());
				}

				if(debug_pub.getNumSubscribers() > 0)
				{
					//show also the internal image resulting from the threshold operation
					cv_bridge::CvImage debug_msg;
					debug_msg.header.frame_id = msg->header.frame_id;
					debug_msg.header.stamp = msg->header.stamp;
					debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
					debug_msg.image = mDetector.getThresholdedImage();
					debug_pub.publish(debug_msg.toImageMsg());
				}
			}
			catch (cv_bridge::Exception& e)
			{
				ROS_ERROR("cv_bridge exception: %s", e.what());
				return;
			}
		}
Beispiel #13
0
  void image_callback(const sensor_msgs::ImageConstPtr& msg)
  {
    //        double ticksBefore = cv::getTickCount();
    static tf::TransformBroadcaster br;
    if(cam_info_received)
    {
      cv_bridge::CvImagePtr cv_ptr;
      try
      {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
        inImage = cv_ptr->image;

        //detection results will go into "markers"
        markers.clear();
        //Ok, let's detect
        mDetector.detect(inImage, markers, camParam, marker_size, false);
        //for each marker, draw info and its boundaries in the image
        for(size_t i=0; i<markers.size(); ++i)
        {
          // only publishing the selected marker
          if(markers[i].id == marker_id)
          {
            tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
            tf::StampedTransform stampedTransform(transform, ros::Time::now(),
                                                  parent_name, child_name);
            br.sendTransform(stampedTransform);
            geometry_msgs::PoseStamped poseMsg;
            tf::poseTFToMsg(transform, poseMsg.pose);
            poseMsg.header.frame_id = parent_name;
            poseMsg.header.stamp = ros::Time::now();
            pose_pub.publish(poseMsg);
          }
          // but drawing all the detected markers
          markers[i].draw(inImage,Scalar(0,0,255),2);
        }

        //draw a 3d cube in each marker if there is 3d info
        if(camParam.isValid() && marker_size!=-1)
        {
          for(size_t i=0; i<markers.size(); ++i)
          {
            //CvDrawingUtils::draw3dCube(inImage, markers[i], camParam);
            CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
          }
        }

        if(image_pub.getNumSubscribers() > 0)
        {
          //show input with augmented information
          cv_bridge::CvImage out_msg;
          out_msg.header.stamp = ros::Time::now();
          out_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC3;
          out_msg.image = inImage;
          image_pub.publish(out_msg.toImageMsg());
        }

        if(debug_pub.getNumSubscribers() > 0)
        {
          //show also the internal image resulting from the threshold operation
          cv_bridge::CvImage debug_msg;
          debug_msg.header.stamp = ros::Time::now();
          debug_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
          debug_msg.image = mDetector.getThresholdedImage();
          debug_pub.publish(debug_msg.toImageMsg());
        }

        //            ROS_INFO("runtime: %f ms",
        //                     1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
      }
      catch (cv_bridge::Exception& e)
      {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
      }
    }
  }
		void image_callback(const sensor_msgs::ImageConstPtr& msg)
		{
            static tf::TransformBroadcaster br;
            
			if(!cam_info_received) return;

			cv_bridge::CvImagePtr cv_ptr;
			try
			{
				cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
				inImage = cv_ptr->image;
				resultImg = cv_ptr->image.clone();

				//detection results will go into "markers"
				markers.clear();
				//Ok, let's detect
				mDetector.detect(inImage, markers, camParam, marker_size, false);
				//Detection of the board
				float probDetect=the_board_detector.detect(markers, the_board_config, the_board_detected, camParam, marker_size);
				if (probDetect > 0.0)
				{
					foundBoard = true; //added///////////////////////
					tf::Transform transform = ar_sys::getTf(the_board_detected.Rvec, the_board_detected.Tvec);

					tf::StampedTransform stampedTransform(transform, msg->header.stamp, msg->header.frame_id, board_frame);
                    
                    			if (publish_tf) 
						br.sendTransform(stampedTransform);

					geometry_msgs::PoseStamped poseMsg;
					tf::poseTFToMsg(transform, poseMsg.pose);
					poseMsg.header.frame_id = msg->header.frame_id;
					poseMsg.header.stamp = msg->header.stamp;
					pose_pub.publish(poseMsg);

					geometry_msgs::TransformStamped transformMsg;
					tf::transformStampedTFToMsg(stampedTransform, transformMsg);
					transform_pub.publish(transformMsg);

					geometry_msgs::Vector3Stamped positionMsg;
					positionMsg.header = transformMsg.header;
					positionMsg.vector = transformMsg.transform.translation;
					position_pub.publish(positionMsg);
					
					std_msgs::Float32 boardSizeMsg;
					boardSizeMsg.data=the_board_detected[0].ssize;
					boardSize_pub.publish(boardSizeMsg);				
					
									
				}
				//ADDED////////////////////////////////////////////////////////////////////////////
				if(rvec_pub.getNumSubscribers() > 0 && foundBoard)
				{
					cv_bridge::CvImage rvecMsg;
					rvecMsg.header.frame_id = msg->header.frame_id;
					rvecMsg.header.stamp = msg->header.stamp;
					rvecMsg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
					rvecMsg.image = the_board_detected[0].Rvec;
					rvec_pub.publish(rvecMsg.toImageMsg());
				}
	
				if(tvec_pub.getNumSubscribers() > 0 && foundBoard)
				{
					cv_bridge::CvImage tvecMsg;
					tvecMsg.header.frame_id = msg->header.frame_id;
					tvecMsg.header.stamp = msg->header.stamp;
					tvecMsg.encoding = sensor_msgs::image_encodings::TYPE_32FC1;
					tvecMsg.image = the_board_detected[0].Tvec;
					tvec_pub.publish(tvecMsg.toImageMsg());
				}
				///////////////////////////////////////////////////////////////////////////////
				
				//for each marker, draw info and its boundaries in the image
				for(size_t i=0; draw_markers && i < markers.size(); ++i)
				{
					
					markers[i].draw(resultImg,cv::Scalar(0,0,255),2);
				}


				if(camParam.isValid() && marker_size != -1)
				{
					//draw a 3d cube in each marker if there is 3d info
					for(size_t i=0; i<markers.size(); ++i)
					{
						if (draw_markers_cube) CvDrawingUtils::draw3dCube(resultImg, markers[i], camParam);
						if (draw_markers_axis) CvDrawingUtils::draw3dAxis(resultImg, markers[i], camParam);
					}
					//draw board axis
					if (probDetect > 0.0)
					{ 
						CvDrawingUtils::draw3dAxis(resultImg, the_board_detected, camParam);
						
					}
				}

				if(image_pub.getNumSubscribers() > 0)
				{
					//show input with augmented information
					cv_bridge::CvImage out_msg;
					out_msg.header.frame_id = msg->header.frame_id;
					out_msg.header.stamp = msg->header.stamp;
					out_msg.encoding = sensor_msgs::image_encodings::RGB8;
					out_msg.image = resultImg;
					image_pub.publish(out_msg.toImageMsg());
				}

				if(debug_pub.getNumSubscribers() > 0)
				{
					//show also the internal image resulting from the threshold operation
					cv_bridge::CvImage debug_msg;
					debug_msg.header.frame_id = msg->header.frame_id;
					debug_msg.header.stamp = msg->header.stamp;
					debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
					debug_msg.image = mDetector.getThresholdedImage();
					debug_pub.publish(debug_msg.toImageMsg());
				}
			}
			catch (cv_bridge::Exception& e)
			{
				ROS_ERROR("cv_bridge exception: %s", e.what());
				return;
			}
		}
void imageGroundPlaneCallback(const ImageConstPtr &color, const CameraInfoConstPtr &camera_info,
                              const GroundPlaneConstPtr &gp)
{
    //    ROS_INFO("Entered gp-img callback");
    std::vector<cudaHOG::Detection> detHog;

    //  unsigned char image
    QImage image_rgb(&color->data[0], color->width, color->height, QImage::Format_RGB888);
    int returnPrepare = hog->prepare_image(image_rgb.convertToFormat(QImage::Format_ARGB32).bits(),
                                           (short unsigned int) color->width, (short unsigned int) color->height);

    if(returnPrepare)
    {
        ROS_ERROR("Error by preparing the image");
        return;
    }

    // Generate base camera
    Matrix<float> R = Eye<float>(3);
    Vector<float> t(3, 0.0);

    // Get GP
    Vector<double> GPN(3, (double*) &gp->n[0]);
    double GPd = ((double) gp->d)*(-1000.0); // GPd = -958.475;
    Matrix<double> K(3,3, (double*)&camera_info->K[0]);

    // NOTE: Using 0 1 0 does not work, apparently due to numerical problems in libCudaHOG (E(1,1) gets zero when solving quadratic form)
    Vector<float> float_GPN(3);
    float_GPN(0) = -0.0123896; //-float(GPN(0));
    float_GPN(1) = 0.999417; //-float(GPN(1)); // swapped with z by Timm
    float_GPN(2) = 0.0317988; //-float(GPN(2));

    float float_GPd = (float) GPd;
    Matrix<float> float_K(3,3);
    float_K(0,0) = K(0,0); float_K(1,0) = K(1,0); float_K(2,0) = K(2,0);
    float_K(1,1) = K(1,1); float_K(0,1) = K(0,1); float_K(2,1) = K(2,1);
    float_K(2,2) = K(2,2); float_K(0,2) = K(0,2); float_K(1,2) = K(1,2);

    //ROS_WARN("Ground plane: %.2f %.2f %.2f d=%.3f", float_GPN(0), float_GPN(1), float_GPN(2), float_GPd);

    // If assertion fails, probably ground plane is wrongly oriented!?
    hog->set_camera(R.data(), float_K.data(), t.data());
    hog->set_groundplane(float_GPN.data(), &float_GPd);
    hog->prepare_roi_by_groundplane();
    hog->test_image(detHog);
    hog->release_image();

    const int WINDOW_WIDTH = 64, WINDOW_HEIGHT = 128;

    GroundHOGDetections detections;

    detections.header = color->header;
    for(unsigned int i=0;i<detHog.size();i++)
    {

        float score = detHog[i].score;
        if (score < score_thresh) continue;
        float scale = detHog[i].scale;

        float width = (WINDOW_WIDTH - 32.0f)*scale;
        float height = (WINDOW_HEIGHT - 32.0f)*scale;
        float x = (detHog[i].x + 16.0f*scale);
        float y = (detHog[i].y + 16.0f*scale);

        detections.scale.push_back(scale);
        detections.score.push_back(score);
        detections.pos_x.push_back(x);
        detections.pos_y.push_back(y);
        detections.width.push_back(width);
        detections.height.push_back(height);

    }

    if(pub_result_image.getNumSubscribers()) {
        ROS_DEBUG("Publishing image");
        render_bbox_2D(detections, image_rgb, 255, 0, 0, 2);

        Image sensor_image;
        sensor_image.header = color->header;
        sensor_image.height = image_rgb.height();
        sensor_image.width  = image_rgb.width();
        sensor_image.step   = color->step;
        vector<unsigned char> image_bits(image_rgb.bits(), image_rgb.bits()+sensor_image.height*sensor_image.width*3);
        sensor_image.data = image_bits;
        sensor_image.encoding = color->encoding;

        pub_result_image.publish(sensor_image);
    }

    pub_message.publish(detections);


    //
    // Now create 3D coordinates for SPENCER DetectedPersons msg
    //
    if(pub_detected_persons.getNumSubscribers()) {
        spencer_tracking_msgs::DetectedPersons detected_persons;
        detected_persons.header = color->header;

        for(unsigned int i=0;i<detHog.size();i++)
        {
            float score = detHog[i].score;
            if (score < score_thresh) continue;
            float scale = detHog[i].scale;

            // FIXME: Is it correct to use these offsets for computing 3D position!?
            float width = (WINDOW_WIDTH - 32.0f)*scale;
            float height = (WINDOW_HEIGHT - 32.0f)*scale;
            float x = (detHog[i].x + 16.0f*scale);
            float y = (detHog[i].y + 16.0f*scale);

            Vector<double> normal(3, 0.0);
            normal(0) = GPN(0);
            normal(1) = GPN(1);
            normal(2) = GPN(2);

            Vector<double> pos3D;
            calc3DPosFromBBox(K, normal, GPd, x, y, width, height, worldScale, pos3D);       

            // DetectedPerson for SPENCER
            spencer_tracking_msgs::DetectedPerson detected_person;
            detected_person.modality = spencer_tracking_msgs::DetectedPerson::MODALITY_GENERIC_MONOCULAR_VISION;
            detected_person.confidence = detHog[i].score; // FIXME: normalize
            detected_person.pose.pose.position.x = -pos3D(0);
            detected_person.pose.pose.position.y = -pos3D(1);
            detected_person.pose.pose.position.z = -pos3D(2);  
            detected_person.pose.pose.orientation.w = 1.0;

            const double LARGE_VARIANCE = 999999999;
            detected_person.pose.covariance[0*6 + 0] = pose_variance;
            detected_person.pose.covariance[1*6 + 1] = pose_variance; // up axis (since this is in sensor frame!)
            detected_person.pose.covariance[2*6 + 2] = pose_variance;
            detected_person.pose.covariance[3*6 + 3] = LARGE_VARIANCE;
            detected_person.pose.covariance[4*6 + 4] = LARGE_VARIANCE;
            detected_person.pose.covariance[5*6 + 5] = LARGE_VARIANCE;

            detected_person.detection_id = current_detection_id;
            current_detection_id += detection_id_increment;

            detected_persons.detections.push_back(detected_person);  
        }

        // Publish
        pub_detected_persons.publish(detected_persons);
    }
}
    void image_callback(const sensor_msgs::ImageConstPtr& msg)
    {
        static tf::TransformBroadcaster br;
        if(cam_info_received)
        {
            cv_bridge::CvImagePtr cv_ptr;
            try
            {
                cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
                inImage = cv_ptr->image;

                //detection results will go into "markers"
                markers.clear();
                //Ok, let's detect
                mDetector.detect(inImage, markers, camParam, marker_size, false);
                //for each marker, draw info and its boundaries in the image
                for(size_t i=0; i<markers.size(); ++i)
                {
                    // only publishing the selected marker
                    if(markers[i].id == marker_id)
                    {
                        tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
                        tf::StampedTransform cameraToReference;
                        cameraToReference.setIdentity();

                        if ( reference_frame != camera_frame )
                        {
                            getTransform(reference_frame,
                                         camera_frame,
                                         cameraToReference);
                        }

                        transform =
                            static_cast<tf::Transform>(cameraToReference)
                            * static_cast<tf::Transform>(rightToLeft)
                            * transform;

                        tf::StampedTransform stampedTransform(transform, ros::Time::now(),
                                                              reference_frame, marker_frame);
                        br.sendTransform(stampedTransform);
                        geometry_msgs::PoseStamped poseMsg;
                        tf::poseTFToMsg(transform, poseMsg.pose);
                        poseMsg.header.frame_id = reference_frame;
                        poseMsg.header.stamp = ros::Time::now();
                        pose_pub.publish(poseMsg);

                        geometry_msgs::TransformStamped transformMsg;
                        tf::transformStampedTFToMsg(stampedTransform, transformMsg);
                        transform_pub.publish(transformMsg);

                        geometry_msgs::Vector3Stamped positionMsg;
                        positionMsg.header = transformMsg.header;
                        positionMsg.vector = transformMsg.transform.translation;
                        position_pub.publish(positionMsg);
                    }
                    // but drawing all the detected markers
                    markers[i].draw(inImage,cv::Scalar(0,0,255),2);
                }

                //draw a 3d cube in each marker if there is 3d info
                if(camParam.isValid() && marker_size!=-1)
                {
                    for(size_t i=0; i<markers.size(); ++i)
                    {
                        CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
                    }
                }

                if(image_pub.getNumSubscribers() > 0)
                {
                    //show input with augmented information
                    cv_bridge::CvImage out_msg;
                    out_msg.header.stamp = ros::Time::now();
                    out_msg.encoding = sensor_msgs::image_encodings::RGB8;
                    out_msg.image = inImage;
                    image_pub.publish(out_msg.toImageMsg());
                }

                if(debug_pub.getNumSubscribers() > 0)
                {
                    //show also the internal image resulting from the threshold operation
                    cv_bridge::CvImage debug_msg;
                    debug_msg.header.stamp = ros::Time::now();
                    debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
                    debug_msg.image = mDetector.getThresholdedImage();
                    debug_pub.publish(debug_msg.toImageMsg());
                }
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR("cv_bridge exception: %s", e.what());
                return;
            }
        }
    }
    void trackNewCloud(const sensor_msgs::PointCloud2Ptr& msg)
    {
        ros::Time start_time_stamp = msg->header.stamp;

        boost::posix_time::ptime start_time = boost::posix_time::microsec_clock::local_time ();
        //std::cout << (start_time - last_cloud_).total_nanoseconds () * 1.0e-9 << std::endl;

        float time_ms = (start_time_stamp - last_cloud_ros_time_).toSec() * 1e3;

        last_cloud_ = start_time;
        last_cloud_ros_time_ = start_time_stamp;

        pcl::ScopeTime t("trackNewCloud");
        scene_.reset(new pcl::PointCloud<PointT>);
        pcl::moveFromROSMsg (*msg, *scene_);

        v4r::DataMatrix2D<Eigen::Vector3f> kp_cloud;
        cv::Mat_<cv::Vec3b> image;

        v4r::convertCloud(*scene_, kp_cloud, image);

        int cam_idx=-1;

        bool is_ok = camtracker->track(image, kp_cloud, pose_, conf_, cam_idx);

        if(debug_image_publisher_.getNumSubscribers())
        {
            drawConfidenceBar(image, conf_);
            sensor_msgs::ImagePtr image_msg = cv_bridge::CvImage(msg->header, "bgr8", image).toImageMsg();
            debug_image_publisher_.publish(image_msg);
        }

        std::cout << time_ms << " conf:" << conf_ << std::endl;

        if(is_ok)
        {
            selectFrames(*scene_, cam_idx, pose_);
            tf::Transform transform;

            //v4r::invPose(pose, inv_pose);
            transform.setOrigin(tf::Vector3(pose_(0,3), pose_(1,3), pose_(2,3)));
            tf::Matrix3x3 R(pose_(0,0), pose_(0,1), pose_(0,2),
                            pose_(1,0), pose_(1,1), pose_(1,2),
                            pose_(2,0), pose_(2,1), pose_(2,2));
            tf::Quaternion q;
            R.getRotation(q);
            transform.setRotation(q);
            ros::Time now_sync = ros::Time::now();
            cameraTransformBroadcaster.sendTransform(tf::StampedTransform(transform, now_sync, "camera_rgb_optical_frame", "world"));

            bool publish_trajectory = true;
            if (!trajectory_marker_.points.empty())
            {
                const geometry_msgs::Point& last = trajectory_marker_.points.back();
                Eigen::Vector3f v_last(last.x, last.y, last.z);
                Eigen::Vector3f v_curr(-pose_.col(3).head<3>());
                if ((v_last - v_curr).norm() < trajectory_threshold_)
                    publish_trajectory = false;
            }

            if (publish_trajectory)
            {
                geometry_msgs::Point p;
                p.x = -pose_(0,3);
                p.y = -pose_(1,3);
                p.z = -pose_(2,3);
                std_msgs::ColorRGBA c;
                c.a = 1.0;
                c.g = conf_;
                trajectory_marker_.points.push_back(p);
                trajectory_marker_.colors.push_back(c);
                trajectory_marker_.header.stamp = msg->header.stamp;
                trajectory_publisher_.publish(trajectory_marker_);
            }
        }
        else
            std::cout << "cam tracker not ready!" << std::endl;

        /*std_msgs::Float32 conf_mesage;
      conf_mesage.data = conf;
      confidence_publisher_.publish(conf_mesage);*/
    }
void imageCb(const sensor_msgs::ImageConstPtr& msg)
{
    if(image_pub.getNumSubscribers() > 0 ||
       mask_pub.getNumSubscribers() > 0 ||
       debug_pub.getNumSubscribers() > 0)
    {
        cv_bridge::CvImagePtr cv_ptr;
        try
        {
            cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
        }
        catch (cv_bridge::Exception& e)
        {
            ROS_ERROR("cv_bridge exception: %s", e.what());
            return;
        }

        image_rect = cv_ptr->image;

        //double ticksBefore = cv::getTickCount();

        cv::Mat backProject;
        cv::Mat hsv;
        cv::cvtColor(image_rect, hsv, CV_BGR2HSV);
        // Quantize the hue to 30 levels
        // and the saturation to 32 levels
        // hue varies from 0 to 179, see cvtColor
        float hranges[] = { 0, 180 };
        // saturation varies from 0 (black-gray-white) to
        // 255 (pure spectrum color)
        float sranges[] = { 0, 256 };
        const float* ranges[] = { hranges, sranges };
        // we compute the histogram from the 0-th and 1-st channels
        int channels[] = {0, 1};

        cv::calcBackProject(&hsv, 1, channels, target_hist, backProject, ranges, 1, true);
        cv::normalize(backProject, backProject, 0, 255, cv::NORM_MINMAX, -1, cv::Mat());
        cv::threshold(backProject, backProject, threshold, 255, CV_THRESH_BINARY);

        cv::Mat mask, tmp1;

        if(dilate_iterations == 0 && erode_iterations == 0)
            mask = backProject;

        if(dilate_iterations > 0)
        {
            cv::dilate(backProject, erode_iterations == 0 ? mask: tmp1,
                       cv::Mat::ones(dilate_size, dilate_size, CV_8UC1),
                       cv::Point(-1, -1), dilate_iterations);
        }

        if(erode_iterations > 0)
        {
            cv::erode(dilate_iterations == 0 ? backProject : tmp1, mask,
                      cv::Mat::ones(erode_size, erode_size, CV_8UC1),
                      cv::Point(-1, -1), erode_iterations);
        }

        ros::Time now = msg->header.stamp; //ros::Time::now();

        if(mask_pub.getNumSubscribers() > 0)
        {
            cv_bridge::CvImage mask_msg;
            mask_msg.header = msg->header;
            mask_msg.header.stamp = now; //ros::Time::now();
            mask_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
            mask_msg.image = mask;
            mask_pub.publish(mask_msg.toImageMsg());
        }

        if(image_pub.getNumSubscribers() > 0)
        {
            cv::Mat masked;
            image_rect.copyTo(masked, mask);
            cv_bridge::CvImage masked_msg;
            masked_msg.header = msg->header;
            masked_msg.encoding = sensor_msgs::image_encodings::BGR8;
            masked_msg.image = masked;
            masked_msg.header.stamp  = now; //ros::Time::now();
            image_pub.publish(*masked_msg.toImageMsg());
        }

        //DEBUG
        if(debug_pub.getNumSubscribers() > 0)
        {
            cv_bridge::CvImage debug_msg;
            debug_msg.header = msg->header;
            debug_msg.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
            debug_msg.image = backProject;
            debug_pub.publish(*debug_msg.toImageMsg());
        }

        //    ROS_INFO("imageCb runtime: %f ms",
        //             1000*(cv::getTickCount() - ticksBefore)/cv::getTickFrequency());
    }
}
Beispiel #19
0
  /** Main driver loop */
  bool spin()
  {
    while (nh_.ok())
      {
        getParameters();                // check reconfigurable parameters

        // get current CameraInfo data
        cam_info_ = cinfo_->getCameraInfo();
        cloud2_.header.frame_id = cloud_.header.frame_id = 
	  image_d_.header.frame_id = image_i_.header.frame_id = 
	  image_c_.header.frame_id = image_d16_.header.frame_id = 
	  cam_info_.header.frame_id = camera_name_;//config_.frame_id;

        if(!device_open_)
          {
            try
              {
		if (dev_->open(config_.auto_exposure, config_.integration_time,
			       modulation_freq_, config_.amp_threshold, ether_addr_) == 0)
                  {
                    ROS_INFO_STREAM("[" << camera_name_ << "] Connected to device with ID: "
                                    << dev_->device_id_);
                    ROS_INFO_STREAM("[" << camera_name_ << "] libmesasr version: " << dev_->lib_version_);
                    device_open_ = true; 
                  }
                else
                  {
                    ros::Duration(3.0).sleep();
                  }
              }
            catch (sr::Exception& e)
              {
                ROS_ERROR_STREAM("Exception thrown while connecting to the camera: "
                                 << e.what ());
                ros::Duration(3.0).sleep();
              }
          }
        else
          {
            try
              {
                // Read data from the Camera
                dev_->readData(cloud_,cloud2_,image_d_, image_i_, image_c_, image_d16_);
 
                cam_info_.header.stamp = image_d_.header.stamp;
                cam_info_.height = image_d_.height;
                cam_info_.width = image_d_.width;

                // Publish it via image_transport
                if (info_pub_.getNumSubscribers() > 0)
                  info_pub_.publish(cam_info_);
                if (image_pub_d_.getNumSubscribers() > 0)
                  image_pub_d_.publish(image_d_);
                if (image_pub_i_.getNumSubscribers() > 0)
                  image_pub_i_.publish(image_i_);
                if (image_pub_c_.getNumSubscribers() > 0)
                  image_pub_c_.publish(image_c_);
                if (image_pub_d16_.getNumSubscribers() > 0)
                  image_pub_d16_.publish(image_d16_);
                if (cloud_pub_.getNumSubscribers() > 0)
                  cloud_pub_.publish (cloud_);
                if (cloud_pub2_.getNumSubscribers() > 0)
                  cloud_pub2_.publish (cloud2_);
              }
            catch (sr::Exception& e) {
              ROS_WARN("Exception thrown trying to read data: %s",
                       e.what());
              dev_->close();
              device_open_ = false;
              ros::Duration(3.0).sleep();
              }
          }
        ros::spinOnce();
      }

    return true;
  }
  void image_callback(const sensor_msgs::ImageConstPtr& msg)
  {
    static tf::TransformBroadcaster br;
    if(cam_info_received_)
    {
      cv_bridge::CvImagePtr cv_ptr;
      try
      {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
        inImage_ = cv_ptr->image;

        //clear out previous detection results
        markers_.clear();
        marker_msg_->markers.clear();

        //Ok, let's detect
        mDetector_.detect(inImage_, markers_, camParam_, marker_size_, false);
        marker_msg_->markers.resize(markers_.size());

        ros::Time curr_stamp(ros::Time::now());
        marker_msg_->header.stamp = curr_stamp;
        marker_msg_->header.seq++;

        //get the current transform from the camera frame to output ref frame
        tf::StampedTransform cameraToReference;
        cameraToReference.setIdentity();

        if ( reference_frame_ != camera_frame_ )
        {
          getTransform(reference_frame_,
              camera_frame_,
              cameraToReference);
        }

        //Now find the transform for each detected marker
        for(size_t i=0; i < markers_.size(); ++i)
        {
          tf::Transform transform = aruco_ros::arucoMarker2Tf(markers_[i]);
          transform = static_cast<tf::Transform>(cameraToReference) * transform;

          //Maybe change this to broadcast separate transforms for each marker?
          //tf::StampedTransform stampedTransform(transform, curr_stamp,
              //reference_frame_, marker_frame_);
          //br.sendTransform(stampedTransform);

          aruco_msgs::Marker & marker_i = marker_msg_->markers.at(i);
          tf::poseTFToMsg(transform, marker_i.pose.pose);
          marker_i.header.frame_id = reference_frame_;
          marker_i.header.stamp = curr_stamp;
          marker_i.id = markers_.at(i).id;
          marker_i.confidence = 1.0;

          markers_[i].draw(inImage_,cv::Scalar(0,0,255),2);
        }

        //publish marker array
        if (marker_msg_->markers.size() > 0)
          marker_pub_.publish(marker_msg_);

        //draw a 3d cube in each marker if there is 3d info
        if(camParam_.isValid() && marker_size_!=-1)
        {
          for(size_t i=0; i<markers_.size(); ++i)
            aruco::CvDrawingUtils::draw3dAxis(inImage_, markers_[i], camParam_);
        }

        if(image_pub_.getNumSubscribers() > 0)
        {
          //show input with augmented information
          cv_bridge::CvImage out_msg;
          out_msg.header.stamp = curr_stamp;
          out_msg.encoding = sensor_msgs::image_encodings::RGB8;
          out_msg.image = inImage_;
          image_pub_.publish(out_msg.toImageMsg());
        }

        if(debug_pub_.getNumSubscribers() > 0)
        {
          //show also the internal image resulting from the threshold operation
          cv_bridge::CvImage debug_msg;
          debug_msg.header.stamp = curr_stamp;
          debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
          debug_msg.image = mDetector_.getThresholdedImage();
          debug_pub_.publish(debug_msg.toImageMsg());
        }
      }
      catch (cv_bridge::Exception& e)
      {
        ROS_ERROR("cv_bridge exception: %s", e.what());
      }
    }
  }