Пример #1
0
  void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
  {
    // Work on the image.
    try
    {
      // Convert the image into something opencv can handle.
      cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;

      // Messages
      opencv_apps::RotatedRectArrayStamped rects_msg, ellipses_msg;
      rects_msg.header = msg->header;
      ellipses_msg.header = msg->header;

      // Do the work
      cv::Mat src_gray;

      /// Convert image to gray and blur it
      if ( frame.channels() > 1 ) {
        cv::cvtColor( frame, src_gray, cv::COLOR_RGB2GRAY );
      } else {
        src_gray = frame;
      }
      cv::blur( src_gray, src_gray, cv::Size(3,3) );

      /// Create window
      if( debug_view_) {
        cv::namedWindow( window_name_, cv::WINDOW_AUTOSIZE );
      }

      cv::Mat threshold_output;
      int max_thresh = 255;
      std::vector<std::vector<cv::Point> > contours;
      std::vector<cv::Vec4i> hierarchy;
      cv::RNG rng(12345);

      /// Detect edges using Threshold
      cv::threshold( src_gray, threshold_output, threshold_, 255, cv::THRESH_BINARY );

      /// Find contours
      cv::findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );

      /// Find the rotated rectangles and ellipses for each contour
      std::vector<cv::RotatedRect> minRect( contours.size() );
      std::vector<cv::RotatedRect> minEllipse( contours.size() );

      for( size_t i = 0; i < contours.size(); i++ )
      { minRect[i] = cv::minAreaRect( cv::Mat(contours[i]) );
        if( contours[i].size() > 5 )
        { minEllipse[i] = cv::fitEllipse( cv::Mat(contours[i]) ); }
      }

      /// Draw contours + rotated rects + ellipses
      cv::Mat drawing = cv::Mat::zeros( threshold_output.size(), CV_8UC3 );
      for( size_t i = 0; i< contours.size(); i++ )
      {
        cv::Scalar color = cv::Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
        // contour
        cv::drawContours( drawing, contours, (int)i, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
        // ellipse
        cv::ellipse( drawing, minEllipse[i], color, 2, 8 );
        // rotated rectangle
        cv::Point2f rect_points[4]; minRect[i].points( rect_points );
        for( int j = 0; j < 4; j++ )
          cv::line( drawing, rect_points[j], rect_points[(j+1)%4], color, 1, 8 );

        opencv_apps::RotatedRect rect_msg;
        opencv_apps::Point2D rect_center;
        opencv_apps::Size rect_size;
        rect_center.x = minRect[i].center.x;
        rect_center.y = minRect[i].center.y;
        rect_size.width = minRect[i].size.width;
        rect_size.height = minRect[i].size.height;
        rect_msg.center = rect_center;
        rect_msg.size = rect_size;
        rect_msg.angle = minRect[i].angle;
        opencv_apps::RotatedRect ellipse_msg;
        opencv_apps::Point2D ellipse_center;
        opencv_apps::Size ellipse_size;
        ellipse_center.x = minEllipse[i].center.x;
        ellipse_center.y = minEllipse[i].center.y;
        ellipse_size.width = minEllipse[i].size.width;
        ellipse_size.height = minEllipse[i].size.height;
        ellipse_msg.center = ellipse_center;
        ellipse_msg.size = ellipse_size;
        ellipse_msg.angle = minEllipse[i].angle;

        rects_msg.rects.push_back(rect_msg);
        ellipses_msg.rects.push_back(ellipse_msg);
      }

      /// Create a Trackbar for user to enter threshold
      if( debug_view_) {
        if (need_config_update_) {
          config_.threshold = threshold_;
          srv.updateConfig(config_);
          need_config_update_ = false;
        }
        cv::createTrackbar( "Threshold:", window_name_, &threshold_, max_thresh, trackbarCallback);

        cv::imshow( window_name_, drawing );
        int c = cv::waitKey(1);
      }

      // Publish the image.
      sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, sensor_msgs::image_encodings::BGR8, drawing).toImageMsg();
      img_pub_.publish(out_img);
      rects_pub_.publish(rects_msg);
      ellipses_pub_.publish(ellipses_msg);
    }
    catch (cv::Exception &e)
    {
      NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
    }

    prev_stamp_ = msg->header.stamp;
  }
Пример #2
0
//This function is called everytime a new image is published
void imageCallback(const sensor_msgs::ImageConstPtr& original_image)
{
    //Convert from the ROS image message to a CvImage suitable for working with OpenCV for processing
    cv_bridge::CvImagePtr cv_ptr;
    try
    {
        //Always copy, returning a mutable CvImage
        //OpenCV expects color images to use BGR channel order.
        cv_ptr = cv_bridge::toCvCopy(original_image, enc::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
        //if there is an error during conversion, display it
        ROS_ERROR("tutorialROSOpenCV::main.cpp::cv_bridge exception: %s", e.what());
        return;
    }
 
 /*   //Invert Image
    //Go through all the rows
    for(int i=0; i<cv_ptr->image.rows; i++)
    {
        //Go through all the columns
        for(int j=0; j<cv_ptr->image.cols; j++)
        {
            //Go through all the channels (b, g, r)
            for(int k=0; k<cv_ptr->image.channels(); k++)
            {
                //Invert the image by subtracting image data from 255               
                cv_ptr->image.data[i*cv_ptr->image.rows*4+j*3 + k] = 255-cv_ptr->image.data[i*cv_ptr->image.rows*4+j*3 + k];
            }
        }
    }
   */ 
     cv::Mat img_mask_blue,img_hsv_blue,combined_Image; 
     cv::cvtColor(cv_ptr->image,img_hsv_blue,CV_BGR2HSV);
    cv::inRange(img_hsv_blue,cv::Scalar(LowerH,LowerS,LowerV),cv::Scalar(UpperH,UpperS,UpperV),img_mask_blue); 
   
     cv::Mat img_mask_red,img_hsv_red; 
     cv::cvtColor(cv_ptr->image,img_hsv_red,CV_BGR2HSV);
     cv::inRange(img_hsv_red,cv::Scalar(17, 15, 100), cv::Scalar(10, 255, 255),img_mask_red); 
	
    cv::addWeighted ( img_mask_red, 1, img_mask_blue, 1, 0.0, combined_Image);
     
    // cv::Mat img_mask_green,img_hsv_green; 
    /// cv::cvtColor(cv_ptr->image,img_hsv_green,CV_BGR2HSV);
    // cv::inRange(img_hsv_green,cv::Scalar(LowerH,LowerS,LowerV),cv::Scalar(UpperH,UpperS,UpperV),img_mask_green); 
   
     
     
     //Display the image using OpenCV
     cv::imshow(WINDOW, combined_Image);
     //cv::imshow(WINDOW, img_mask_red);
    // cv::imshow(WINDOW, img_mask_green);

     //Add some delay in miliseconds. The function only works if there is at least one HighGUI window created and the window is active. If there are several HighGUI windows, any of them can be active.
     cv::waitKey(3);
     //Calculate the moments of the thresholded image
   

 
         //Convert the CvImage to a ROS image message and publish it on the "camera/image_processed" topic.
    pub.publish(cv_ptr->toImageMsg());
 
}
Пример #3
0
  void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
  {
    // Work on the image.
    try
    {
      // Convert the image into something opencv can handle.
      cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;

      // Messages
      opencv_apps::FlowArrayStamped flows_msg;
      flows_msg.header = msg->header;

      if( debug_view_) {
        cv::namedWindow( window_name_, cv::WINDOW_AUTOSIZE );
        if (need_config_update_) {
          srv.updateConfig(config_);
          need_config_update_ = false;
        }
      }

      // Do the work
      if ( frame.channels() > 1 ) {
        cv::cvtColor( frame, gray, cv::COLOR_BGR2GRAY );
      } else {
        frame.copyTo(gray);
      }
      if( prevgray.data )
      {
        cv::calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
        cv::cvtColor(prevgray, cflow, cv::COLOR_GRAY2BGR);
        //drawOptFlowMap(flow, cflow, 16, 1.5, Scalar(0, 255, 0));
        int step = 16;
        cv::Scalar color = cv::Scalar(0, 255, 0);
        for(int y = 0; y < cflow.rows; y += step)
          for(int x = 0; x < cflow.cols; x += step)
        {
          const cv::Point2f& fxy = flow.at<cv::Point2f>(y, x);
          cv::line(cflow, cv::Point(x,y), cv::Point(cvRound(x+fxy.x), cvRound(y+fxy.y)),
                   color);
          cv::circle(cflow, cv::Point(x,y), 2, color, -1);

          opencv_apps::Flow flow_msg;
          opencv_apps::Point2D point_msg;
          opencv_apps::Point2D velocity_msg;
          point_msg.x = x;
          point_msg.y = y;
          velocity_msg.x = fxy.x;
          velocity_msg.y = fxy.y;
          flow_msg.point = point_msg;
          flow_msg.velocity = velocity_msg;
          flows_msg.flow.push_back(flow_msg);
        }
      }

      std::swap(prevgray, gray);

      //-- Show what you got
      if( debug_view_) {
        cv::imshow( window_name_, cflow );
        int c = cv::waitKey(1);
      }


      // Publish the image.
      sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, "bgr8", cflow).toImageMsg();
      img_pub_.publish(out_img);
      msg_pub_.publish(flows_msg);
    }
    catch (cv::Exception &e)
    {
      NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
    }

    prev_stamp_ = msg->header.stamp;
  }
Пример #4
0
void imageCallback(const sensor_msgs::ImageConstPtr & msg){

#ifdef PRINT_ROS_INFO
  ROS_INFO("Got image message.");
#endif

  // get the compressed image, and convert it to Opencv format.
  cv::Mat img;
  try{
   img =  cv_bridge::toCvShare(msg, "bgr8")->image;
  }
  catch(cv_bridge::Exception & e){
    ROS_ERROR("Could not convert from '%s' to 'bgr8'.", msg->encoding.c_str());
  }

#ifdef PRINT_ROS_INFO
  ROS_INFO("Converting image done.");
#endif  

  //std::cout << "image size = ( " << img.rows << " X " << img.cols << " )." << std::endl;
  //printf("image data address 0x%x\n", img.data);

  if( startTracking ){

    //trackerMutex.lock();

#ifdef PRINT_ROS_INFO

    ROS_INFO("Tracker: Reading Frame ... ");
#endif    

    // update the tracking status, and draw the result.
    tracker->readFrame(img);
    
#ifdef PRINT_ROS_INFO
    ROS_INFO("Tracker: Updating status ... ");
#endif    

    tracker->updateTrackerStatus();

#ifdef PRINT_ROS_INFO
    ROS_INFO("Tracker: status updated ... ");
    ROS_INFO("Tracker: drawing ... ");
#endif    

    cv::Mat temp;
    img.copyTo(temp);
    tracker->drawTrackers(temp);
    
#ifdef PRINT_ROS_INFO
    ROS_INFO("Tracker: Publishing ... ");
#endif    

    // republish this image.
    sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", temp).toImageMsg();
    pub.publish(msg);

    // publish to topic -- object_location
    cv::Point2i location = tracker->getWeightedAverageLocation();
    std::stringstream locationStrStream;

    int currentNum = tracker->getSampleNum();
    int numWithHigConfidence = tracker->getNumOfSamplesHasProbLargerThan(PROB_THRESHOD);

    
    float highConfidenceSampleRatio;
    if( currentNum <= 0){
      highConfidenceSampleRatio = 0;
    }else{
      highConfidenceSampleRatio = numWithHigConfidence * 1.0f / currentNum;
    }

    std::cout << "High confidence sample ratio = " << highConfidenceSampleRatio << std::endl; 
    
    if( location.x < 0 || location.y < 0 || highConfidenceSampleRatio <= HIGH_CONFID_NUM_RATIO_THRESHOLD ){
      //locationStrStream << "object_x " << "0" << " , " << "object_y " << "0" << " , ";
      
      locationStrStream << "object_x " << img.cols /2   << ", " << "object_y " << img.rows / 2 << ", ";
      
      // make offsets to the samples:
      
      ROS_INFO("Tracker offset!");
      if( lastMovementDirection == TRACKER_UP)
	tracker->offsetTracker(TRACKER_DOWN);
      else if( lastMovementDirection == TRACKER_DOWN)
	tracker->offsetTracker(TRACKER_UP);
      else if( lastMovementDirection == TRACKER_LEFT)
	tracker->offsetTracker(TRACKER_RIGHT);
      else if( lastMovementDirection == TRACKER_RIGHT)
	tracker->offsetTracker(TRACKER_LEFT);
      
      
    }else{
      // "x 10, y 10, width 360, height 640"
      locationStrStream << "object_x " << location.x << ", " << "object_y " << location.y << ", ";
      lastMovementDirection = -1;
    }

    locationStrStream << "width " << img.cols << ", " << "height " << img.rows << ", ";

    locationStrStream << "direction follow" ;

    std_msgs::String locationMsg;
    locationMsg.data = locationStrStream.str();
    location_pub.publish(locationMsg);
        
    // release the lock
    //trackerMutex.unlock();

  }

  else if (! objectSpecified ) {
    // show the image and let the user specify the inital result.

    //std::cout << img.rows << "," << img.cols << std::endl;
    
    cv::Mat tempImage(img.rows, img.cols, img.type());
    img.copyTo(tempImage);

    
    if( trackerMaxSize < 0){
      trackerMaxSize = MIN(img.rows, img.cols) - 1;
    }
    
    ROS_INFO("Drawing tracker ... ");
    
    //cv::rectangle(tempImage, cv::Rect(tempImage.cols / 2 - trackerSize / 2, tempImage.rows / 2 - trackerSize / 2, trackerSize, trackerSize), cv::Scalar(0,0,255));
    cv::rectangle(tempImage, cv::Rect(leftTopX, leftTopY, trackerSize, trackerSize), cv::Scalar(0,0,255));
    
    // republish this image.
    sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", tempImage).toImageMsg();
    pub.publish(msg);

  }
  
  else{

    //trackerMutex.lock();
    
    // haven't started tracking, but the initial object is specified.
    // create a tracker;
    if( tracker != NULL) delete tracker;

    tracker = new Tracker2D();

    tracker->setLimit(5 , img.cols - 5, 5 , img.rows - 5);
    
    //tracker->initialize(cv::Point2f(img.cols / 2, img.rows / 2), trackerSize);
    tracker->initialize(cv::Point2f(leftTopX + trackerSize / 2, leftTopY + trackerSize / 2), trackerSize);
    
    // set object feature.
    //cv::Mat objImage = img(cv::Rect(img.cols / 2 - trackerSize /2, img.rows / 2 - trackerSize /2, trackerSize, trackerSize));
    cv::Mat objImage = img(cv::Rect(leftTopX, leftTopY, trackerSize, trackerSize));
    
    tracker->setObjectFeature(objImage);
    
    ROS_INFO("Starting Tracking ... " );
    
    startTracking = true;

    //trackerMutex.unlock();
  }
}
Пример #5
0
int main(int argc, char **argv)
{
    dirName = argv[1];
    toFeatures = pathToData + "/FeatureData/" + dirName;
    toPhotos = pathToData + "/RenderedImages/" + dirName;
    srand(time(0));
    ros::init(argc ,argv, "ROS_Publisher");
    NodeHandle node;
    image_transport::ImageTransport it(node);

    std::cout << "Starting image load" << endl;
    loadImages();
    cout << "Done Loading Images" << endl;
    getchar();

    mcl_data_subscriber = node.subscribe(mcl_data_publisher_name, 4, MyDataCallback);

    time_t temptime = time(0);
    std::cout << "Waiting for Handshake from Program .." << std::endl;
    while(!handshake_recieved && (time(0) - temptime) < 20)
    {
        ros::spinOnce();
    }
    if(handshake_recieved)
        std::cout << "Handshake recieved" << std::endl;
    else
    {
        std::cout << "No handshake recieved";
        return -1;
    }
    
    movement_publisher = node.advertise<std_msgs::String>("ROBOT_MOVEMENT_PUBLISHER", 4);

    data_publisher = it.advertise(publish_image_data_under, 4, true);


    char key = 'k';
    namedWindow("Robot Image");
    namedWindow("Top Match");

    while(ros::ok() && key != 'q')
    {
        ros::spinOnce();
        int i = rand()%image_names.size();
        cv_bridge::CvImage out_msg;
        ros::Time scan_time = ros::Time::now();
        out_msg.header.stamp = scan_time;
        out_msg.header.frame_id = "robot_image";
        out_msg.encoding = sensor_msgs::image_encodings::BGR8;
        out_msg.image = image_list[current_image];
        
        if(!out_msg.image.empty())
            data_publisher.publish(out_msg.toImageMsg());
        ros::spinOnce();

        // std::cout << "hererer" << std::endl;
        imshow("Robot Image", image_list[current_image]);
        imshow("Top Match", BestGuessImage);

        key = cv::waitKey(2);
        // if(key == ' ')
        //     current_image++;
        // if(current_image == image_list.size())
        //     current_image = 0;
        //ros::Duration(0.1).sleep();
    }

    std_msgs::String msg;
    stringstream ss;
    ss << killflag << "_";
    msg.data = ss.str();
    movement_publisher.publish(msg);

    ros::spinOnce();
    ros::spinOnce();

    ros::Duration(0.1).sleep();
    ros::spinOnce();
    ros::shutdown();

}
	void imageCb(const sensor_msgs::ImageConstPtr& msg)
	{
		RNG rng(12345);
		cv_bridge::CvImagePtr cv_ptr;
		
		try{ 
			cv_ptr = cv_bridge::toCvCopy(msg, enc::BGR8);
		 }
		catch(cv_bridge::Exception& e){	ROS_ERROR("cv_bridge exception: %s", e.what()); return;	}
		
		
		frame = cv_ptr->image;
		cvtColor(frame, frame_hsv, CV_BGR2HSV);
		center_mat = Rect(Point(frame.cols/2 - 10, frame.rows/2 - 10), Point(frame.cols/2 + 10, frame.rows/2 + 10));
		
		if(set_A_next_frame){

			Mat croppedHSV = frame_hsv(center_mat);
			Mat croppedFrame   = frame(center_mat);
			Scalar meanScalar, stdScalar;
			meanStdDev(croppedHSV, meanScalar, stdScalar);
		
			if(onetwoA == 0){
				detect_color_A2 = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
				meanStdDev(croppedFrame, meanScalar, stdScalar);
				color_A2_RGB = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);		
			}
			if(onetwoA == 1){
				detect_color_A1 = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
				meanStdDev(croppedFrame, meanScalar, stdScalar);
				color_A1_RGB = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
			}
			
			onetwoA = 1 - onetwoA;
			recalculate_detection(0);	

			set_A_next_frame = false;
			waitKey(3);
			image_pub_.publish(cv_ptr->toImageMsg());
		}
		
		if(set_B_next_frame){

			Mat croppedHSV = frame_hsv(center_mat);
			Mat croppedFrame   = frame(center_mat);
			Scalar meanScalar, stdScalar;
			meanStdDev(croppedHSV, meanScalar, stdScalar);
		
			if(onetwoB == 0){
				detect_color_B2 = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
				meanStdDev(croppedFrame, meanScalar, stdScalar);
				color_B2_RGB = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);		
			}
			if(onetwoB == 1){
				detect_color_B1 = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
				meanStdDev(croppedFrame, meanScalar, stdScalar);
				color_B1_RGB = Scalar( meanScalar.val[0], meanScalar.val[1], meanScalar.val[2]);
			}
			
			onetwoB = 1 - onetwoB;
			recalculate_detection(1);	

			set_B_next_frame = false;
			waitKey(3);
			image_pub_.publish(cv_ptr->toImageMsg());
		}
		
		
		
		if(!set_B_next_frame && !set_B_next_frame){
			Scalar color = Scalar( 0,0,0 );
				
			rectangle(frame,center_mat, color, 2,8,0);
			
			rectangle(frame, Point(5,0), Point(12,7), color_A1_RGB , 4,8,0);
			rectangle(frame, Point(17,0), Point(24,7), color_A2_RGB, 4,8,0);
			
			rectangle(frame, Point(40,0), Point(47,7), color_B1_RGB, 4,8,0);
			rectangle(frame, Point(52,0), Point(59,7), color_B2_RGB, 4,8,0);
			
			inRange(frame_hsv.clone(), detect_color_A_min, detect_color_A_max, thresh_A);
			inRange(frame_hsv.clone(), detect_color_B_min, detect_color_B_max, thresh_B);
			
			findContours(thresh_A, contours_A, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
			findContours(thresh_B, contours_B, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);				
			
			Moments moment;
			int x_mass, y_mass, x_center = frame.cols / 2, y_center = frame.rows / 2;
			CvPoint mass_center = cvPoint(0,0), img_center = cvPoint(x_center,y_center);
			double max_area = 0, current_area;
			int max_area_index = -1;
			
			for( size_t i = 0; i< contours_A.size(); i++ )
			{
				current_area = contourArea(contours_A[i]);
				if(max_area < current_area )  
				{
					max_area = current_area;
					max_area_index = i;
				}

			}
							
			if (max_area_index > -1){
				color = Scalar( 240, 100, 100 );
				drawContours( frame, contours_A, max_area_index, color, 2, 8, vector<Vec4i>(), 0, Point() );
				moment = moments( contours_A[max_area_index], false);
				lpad_A.x = mass_center.x = x_mass = moment.m10 / moment.m00;
				lpad_A.y = mass_center.y = y_mass = moment.m01 / moment.m00;
				lpad_A.z = 0;
				line(frame,mass_center, img_center, color, 4, 8 , 0);
				

				frameSize.data.clear();
				frameSize.data.push_back(frame.size().width);
				frameSize.data.push_back(frame.size().height);

				pubFrameSize.publish(frameSize);
				pubPosition.publish(lpad_A);				
			}
			
			
			
			max_area = 0;
			max_area_index = -1;
			for( size_t i = 0; i< contours_B.size(); i++ )
			{
				current_area = contourArea(contours_B[i]);
				if(max_area < current_area )  
				{
					max_area = current_area;
					max_area_index = i;
				}
			}
								
			if (max_area_index > -1){
				color = Scalar( 0, 255, 255 );
				drawContours( frame, contours_B, max_area_index, color, 2, 8, vector<Vec4i>(), 0, Point() );
				moment = moments( contours_B[max_area_index], false);
				lpad_B.x = mass_center.x = x_mass = moment.m10 / moment.m00;
				lpad_B.y = mass_center.y = y_mass = moment.m01 / moment.m00;
				lpad_B.z = 1;
				line(frame,mass_center, img_center, color, 4, 8 , 0);
				
				pubPosition.publish(lpad_B);
			}
			
			
			
			imshow(WINDOW, frame);

			waitKey(3);
		
			image_pub_.publish(cv_ptr->toImageMsg());
		}
	}
Пример #7
0
void filterDetectedObjects(std_msgs::Header currentHeader, Mat& grey, Mat& thresholded, double canny_threshold, int cornerThreshold, list<DetectedObject*>& detectedObjects) {
    for (list<DetectedObject*>::const_iterator iterator = detectedObjects.begin(), end = detectedObjects.end(); iterator != end; ++iterator) {
    	DetectedObject* obj = *iterator;
    	/*if (obj->getName() == "Thin alu profile") {
    		Mat* region = extractRotatedRegion(thresholded, obj->getRotatedRect());
    		cv_bridge::CvImagePtr cv_pextracted(new cv_bridge::CvImage);
    		sensor_msgs::ImagePtr msg_extracted;

    		vector<vector<Point> > contours;
    		vector<Vec4i> hierarchy;
    		vector<int> cvxHullIndices;
    		vector<Vec4i> cvxDefects;
    		vector<vector<Point> > contours_poly(1);
    		findContours(*region, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    		if (contours.size() <= 0) {
    			continue;
    		}

    		cout << "Contour size: " << contours[0].size() << endl;

    		//approxPolyDP( Mat(contours[0]), contours_poly[0], approxPolyEpsilon, true );

    		//convexHull(contours_poly[0], cvxHullIndices, true, true);
    		//if (cvxHullIndices.size() < 4) {
    		//	continue;
    		//}
    		//convexityDefects(contours_poly[0], cvxHullIndices, cvxDefects);

    		//if (cvxDefects.size() > 2) {
    		//	obj->setName("Big screw");
    		//}

    		Mat regionColored;
    		cvtColor(*region, regionColored, CV_GRAY2BGR);
    		Scalar color (0, 255, 0);
    		drawContours(regionColored, contours_poly, 0, color);


    		cv_pextracted->image = regionColored;

			cv_pextracted->header = currentHeader;
			cv_pextracted->encoding = sensor_msgs::image_encodings::BGR8;
			msg_extracted = cv_pextracted->toImageMsg();

			pub_extracted.publish(msg_extracted);

			cv_pextracted->image.release();


    		delete region;
    	}else */if (obj->getName() == "Small cylinder") {
    		Mat* region = extractRotatedRegion(grey, obj->getRotatedRect());

    		Mat regCanny;
    		GaussianBlur(*region, regCanny, Size(3,3), 10, 10);

    		Canny(regCanny, regCanny, canny_threshold, canny_threshold);

    		cv_bridge::CvImagePtr cv_pextracted(new cv_bridge::CvImage);
    		sensor_msgs::ImagePtr msg_extracted;

    		//cout << "NonZeroCount: " << countNonZero(regCanny) << endl;
    		vector<Point2f> corners;
    		goodFeaturesToTrack(regCanny, corners, 100, 0.3, 10);

    		Mat cannyColored;

    		cvtColor(regCanny, cannyColored, CV_GRAY2BGR);

    		for (size_t i=0; i<corners.size(); i++) {
    			int x = corners[i].x;
    			int y = corners[i].y;
    			cannyColored.at<Vec3b>(y,x)[0] = 0;
    			cannyColored.at<Vec3b>(y,x)[1] = 0;
    			cannyColored.at<Vec3b>(y,x)[2] = 255;
    		}

    		//cout << "CornersCount: " << corners.size() << endl;

    		if (corners.size() > cornerThreshold) {
    			obj->setName("Rough small cylinder");
    		} else {
    			obj->setName("Smooth small cylinder");
    		}

    		cv_pextracted->image = cannyColored;

    		cv_pextracted->header = currentHeader;
    		cv_pextracted->encoding = sensor_msgs::image_encodings::BGR8;
            msg_extracted = cv_pextracted->toImageMsg();

            pub_extracted.publish(msg_extracted);

            cv_pextracted->image.release();
    		delete region;
    	}
    }

}
		void image_callback(const sensor_msgs::ImageConstPtr& msg)
		{
			if(!cam_info_received) return;

			cv_bridge::CvImagePtr cv_ptr;
			try
			{
				cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
				inImage = cv_ptr->image;
				resultImg = cv_ptr->image.clone();

				//detection results will go into "markers"
				markers.clear();

				//Ok, let's detect
				double min_size = boards[0].marker_size;
				for (int board_index = 1; board_index < boards.size(); board_index++)
					if (min_size > boards[board_index].marker_size) min_size = boards[board_index].marker_size;
				mDetector.detect(inImage, markers, camParam, min_size, false);


				for (int board_index = 0; board_index < boards.size(); board_index++)
				{
					Board board_detected;

					//Detection of the board
					float probDetect = the_board_detector.detect(markers, boards[board_index].config, board_detected, camParam, boards[board_index].marker_size);
					if (probDetect > 0.0)
					{
						tf::Transform transform = ar_sys::getTf(board_detected.Rvec, board_detected.Tvec);

						tf::StampedTransform stampedTransform(transform, ros::Time::now(), "apollon_cam_right", boards[board_index].name +"_right");

						//_tfBraodcaster.sendTransform(stampedTransform);		// from phillip

						/*geometry_msgs::PoseStamped poseMsg;
						tf::poseTFToMsg(transform, poseMsg.pose);
						poseMsg.header.frame_id = msg->header.frame_id;
						poseMsg.header.stamp = msg->header.stamp;
						pose_pub.publish(poseMsg);*/

						geometry_msgs::TransformStamped transformMsg;
						tf::transformStampedTFToMsg(stampedTransform, transformMsg);
						transform_pub.publish(transformMsg);

						/*geometry_msgs::Vector3Stamped positionMsg;
						positionMsg.header = transformMsg.header;
						positionMsg.vector = transformMsg.transform.translation;
						position_pub.publish(positionMsg);*/

						if(camParam.isValid())
						{
							//draw board axis
							CvDrawingUtils::draw3dAxis(resultImg, board_detected, camParam);
						}
					}
				}

				//for each marker, draw info and its boundaries in the image
				for(size_t i=0; draw_markers && i < markers.size(); ++i)
				{
					markers[i].draw(resultImg,cv::Scalar(0,0,255),2);
				}

				if(camParam.isValid())
				{
					//draw a 3d cube in each marker if there is 3d info
					for(size_t i=0; i<markers.size(); ++i)
					{
						if (draw_markers_cube) CvDrawingUtils::draw3dCube(resultImg, markers[i], camParam);
						if (draw_markers_axis) CvDrawingUtils::draw3dAxis(resultImg, markers[i], camParam);
					}
				}

				if(image_pub.getNumSubscribers() > 0)
				{
					//show input with augmented information
					cv_bridge::CvImage out_msg;
					out_msg.header.frame_id = msg->header.frame_id;
					out_msg.header.stamp = msg->header.stamp;
					out_msg.encoding = sensor_msgs::image_encodings::RGB8;
					out_msg.image = resultImg;
					image_pub.publish(out_msg.toImageMsg());
				}

				if(debug_pub.getNumSubscribers() > 0)
				{
					//show also the internal image resulting from the threshold operation
					cv_bridge::CvImage debug_msg;
					debug_msg.header.frame_id = msg->header.frame_id;
					debug_msg.header.stamp = msg->header.stamp;
					debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
					debug_msg.image = mDetector.getThresholdedImage();
					debug_pub.publish(debug_msg.toImageMsg());
				}
			}
			catch (cv_bridge::Exception& e)
			{
				ROS_ERROR("cv_bridge exception: %s", e.what());
				return;
			}
		}
Пример #9
0
  void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
  {
    // Work on the image.
    try
    {
      // Convert the image into something opencv can handle.
      cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;

      // Messages
      opencv_apps::ContourArrayStamped contours_msg;
      contours_msg.header = msg->header;

      // Do the work
      cv::Mat src_gray;

      /// Convert image to gray and blur it
      if ( frame.channels() > 1 ) {
        cv::cvtColor( frame, src_gray, cv::COLOR_RGB2GRAY );
      } else {
        src_gray = frame;
      }
      cv::blur( src_gray, src_gray, cv::Size(3,3) );

      /// Create window
      if( debug_view_) {
        cv::namedWindow( window_name_, cv::WINDOW_AUTOSIZE );
      }

      cv::Mat threshold_output;
      int max_thresh = 255;
      std::vector<std::vector<cv::Point> > contours;
      std::vector<cv::Vec4i> hierarchy;
      cv::RNG rng(12345);

      /// Detect edges using Threshold
      cv::threshold( src_gray, threshold_output, threshold_, 255, cv::THRESH_BINARY );

      /// Find contours
      cv::findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );

      /// Find the convex hull object for each contour
      std::vector<std::vector<cv::Point> >hull( contours.size() );
      for( size_t i = 0; i < contours.size(); i++ )
      {   cv::convexHull( cv::Mat(contours[i]), hull[i], false ); }

      /// Draw contours + hull results
      cv::Mat drawing = cv::Mat::zeros( threshold_output.size(), CV_8UC3 );
      for( size_t i = 0; i< contours.size(); i++ )
      {
        cv::Scalar color = cv::Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
        cv::drawContours( drawing, contours, (int)i, color, 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );
        cv::drawContours( drawing, hull, (int)i, color, 4, 8, std::vector<cv::Vec4i>(), 0, cv::Point() );

        opencv_apps::Contour contour_msg;
        for ( size_t j = 0; j < hull[i].size(); j++ ) {
          opencv_apps::Point2D point_msg;
          point_msg.x = hull[i][j].x;
          point_msg.y = hull[i][j].y;
          contour_msg.points.push_back(point_msg);
        }
        contours_msg.contours.push_back(contour_msg);
      }

      /// Create a Trackbar for user to enter threshold
      if( debug_view_) {
        if (need_config_update_) {
          config_.threshold = threshold_;
          srv.updateConfig(config_);
          need_config_update_ = false;
        }
        cv::createTrackbar( "Threshold:", window_name_, &threshold_, max_thresh, trackbarCallback);

        cv::imshow( window_name_, drawing );
        int c = cv::waitKey(1);
      }

      // Publish the image.
      sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, sensor_msgs::image_encodings::BGR8, drawing).toImageMsg();
      img_pub_.publish(out_img);
      msg_pub_.publish(contours_msg);
    }
    catch (cv::Exception &e)
    {
      NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
    }

    prev_stamp_ = msg->header.stamp;
  }
Пример #10
0
    void imageCb(const sensor_msgs::ImageConstPtr& msg) {
        cv_bridge::CvImagePtr cv_ptr;
        try {
            cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
        } catch (cv_bridge::Exception& e) {
            ROS_ERROR("cv_bridge exception: %s", e.what());
            return;
        }
        // look for red pixels; turn all other pixels black, and turn red pixels white
        int npix = 0; //count the pixels
        int isum = 0; //accumulate the column values of red pixels
        int jsum = 0; //accumulate the row values of red pixels
        int redval, blueval, greenval, testval;
        cv::Vec3b rgbpix;
        for (int i = 0; i < cv_ptr->image.cols; i++) {
            for (int j = 0; j < cv_ptr->image.rows; j++) {
                rgbpix = cv_ptr->image.at<cv::Vec3b>(j, i); //[j][i];
                redval = rgbpix[2] + 1;
                blueval = rgbpix[0] + 1;
                greenval = rgbpix[1] + 1;
                testval = redval / (blueval + greenval);
                //if red, paint it white:
                if (testval > g_redratio) {
                    cv_ptr->image.at<cv::Vec3b>(j, i)[0] = 255;
                    cv_ptr->image.at<cv::Vec3b>(j, i)[1] = 255;
                    cv_ptr->image.at<cv::Vec3b>(j, i)[2] = 255;
                    npix++;
                    isum += i;
                    jsum += j;
                } else { //else paint it black
                    cv_ptr->image.at<cv::Vec3b>(j, i)[0] = 0;
                    cv_ptr->image.at<cv::Vec3b>(j, i)[1] = 0;
                    cv_ptr->image.at<cv::Vec3b>(j, i)[2] = 0;
                }
            }
        }
        cout << "npix: " << npix << endl;
        //paint in a blue square at the centroid:
        int half_box = 5; // choose size of box to paint
        int i_centroid, j_centroid;
        if (npix > 0) {
            i_centroid = isum / npix;
            j_centroid = jsum / npix;
            cout << "i_avg: " << i_centroid << endl; //i,j centroid of red pixels
            cout << "j_avg: " << j_centroid << endl;
            for (int i_box = i_centroid - half_box; i_box <= i_centroid + half_box; i_box++) {
                for (int j_box = j_centroid - half_box; j_box <= j_centroid + half_box; j_box++) {
                    if ((i_box >= 0)&&(j_box >= 0)&&(i_box < cv_ptr->image.cols)&&(j_box < cv_ptr->image.rows)) {
                        cv_ptr->image.at<cv::Vec3b>(j_box, i_box)[0] = 255;
                        cv_ptr->image.at<cv::Vec3b>(j_box, i_box)[1] = 0;
                        cv_ptr->image.at<cv::Vec3b>(j_box, i_box)[2] = 0;
                    }
                }
            }

        }
        // Update GUI Window; this will display processed images on the open-cv viewer.
        cv::imshow(OPENCV_WINDOW, cv_ptr->image);
        cv::waitKey(3);

        // Also, publish the processed image as a ROS message on a ROS topic
        // can view this stream in ROS with: 
        //rosrun imagview image_view image:=/image_converter/output_video
        image_pub_.publish(cv_ptr->toImageMsg());

    }
Пример #11
0
    void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
    {
        // Work on the image.
        try
        {
            // Convert the image into something opencv can handle.
            cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;

            // Messages
            opencv_apps::LineArrayStamped lines_msg;
            lines_msg.header = msg->header;

            // Do the work
            std::vector<cv::Rect> faces;
            cv::Mat src_gray, edges;

            if ( frame.channels() > 1 ) {
                cv::cvtColor( frame, src_gray, cv::COLOR_BGR2GRAY );
            } else {
                src_gray = frame;
            }
            /// Apply Canny edge detector
            Canny( src_gray, edges, 50, 200, 3 );

            if( debug_view_) {
                /// Create Trackbars for Thresholds
                char thresh_label[50];
                sprintf( thresh_label, "Thres: %d + input", min_threshold_ );

                cv::namedWindow( window_name_, cv::WINDOW_AUTOSIZE );
                cv::createTrackbar( thresh_label, window_name_, &threshold_, max_threshold_, trackbarCallback);
                if (need_config_update_) {
                    config_.threshold = threshold_;
                    srv.updateConfig(config_);
                    need_config_update_ = false;
                }
            }

            /// Initialize
            cv::cvtColor( edges, frame, cv::COLOR_GRAY2BGR );

            switch (config_.hough_type) {
            case hough_lines::HoughLines_Standard_Hough_Transform:
            {
                std::vector<cv::Vec2f> s_lines;

                /// 1. Use Standard Hough Transform
                cv::HoughLines( edges, s_lines, 1, CV_PI/180, min_threshold_ + threshold_, 0, 0 );

                /// Show the result
                for( size_t i = 0; i < s_lines.size(); i++ )
                {
                    float r = s_lines[i][0], t = s_lines[i][1];
                    double cos_t = cos(t), sin_t = sin(t);
                    double x0 = r*cos_t, y0 = r*sin_t;
                    double alpha = 1000;

                    cv::Point pt1( cvRound(x0 + alpha*(-sin_t)), cvRound(y0 + alpha*cos_t) );
                    cv::Point pt2( cvRound(x0 - alpha*(-sin_t)), cvRound(y0 - alpha*cos_t) );
#ifndef CV_VERSION_EPOCH
                    cv::line( frame, pt1, pt2, cv::Scalar(255,0,0), 3, cv::LINE_AA);
#else
                    cv::line( frame, pt1, pt2, cv::Scalar(255,0,0), 3, CV_AA);
#endif
                    opencv_apps::Line line_msg;
                    line_msg.pt1.x = pt1.x;
                    line_msg.pt1.y = pt1.y;
                    line_msg.pt2.x = pt2.x;
                    line_msg.pt2.y = pt2.y;
                    lines_msg.lines.push_back(line_msg);
                }

                break;
            }
            case hough_lines::HoughLines_Probabilistic_Hough_Transform:
            default:
            {
                std::vector<cv::Vec4i> p_lines;

                /// 2. Use Probabilistic Hough Transform
                cv::HoughLinesP( edges, p_lines, 1, CV_PI/180, min_threshold_ + threshold_, 30, 10 );

                /// Show the result
                for( size_t i = 0; i < p_lines.size(); i++ )
                {
                    cv::Vec4i l = p_lines[i];
#ifndef CV_VERSION_EPOCH
                    cv::line( frame, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(255,0,0), 3, cv::LINE_AA);
#else
                    cv::line( frame, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(255,0,0), 3, CV_AA);
#endif
                    opencv_apps::Line line_msg;
                    line_msg.pt1.x = l[0];
                    line_msg.pt1.y = l[1];
                    line_msg.pt2.x = l[2];
                    line_msg.pt2.y = l[3];
                    lines_msg.lines.push_back(line_msg);
                }

                break;
            }
            }

            //-- Show what you got
            if( debug_view_) {
                cv::imshow( window_name_, frame );
                int c = cv::waitKey(1);
            }

            // Publish the image.
            sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, msg->encoding,frame).toImageMsg();
            img_pub_.publish(out_img);
            msg_pub_.publish(lines_msg);
        }
        catch (cv::Exception &e)
        {
            NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
        }

        prev_stamp_ = msg->header.stamp;
    }
Пример #12
0
  void imageCb(const sensor_msgs::ImageConstPtr& msg)
  {
    cv_bridge::CvImagePtr cv_ptr;
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }

    static int posX, posY, lastX, lastY; //To hold the X and Y position of the colored object

    Mat rgbCameraFrames = cv_ptr->image; //hold raw image
    Mat colorTrackingFrames; //hold color filtered frame
    Mat resutantFrame; //add raw and scribble frames

    CvMoments colorMoment; //Structure to hold moments information and their order
    assert(rgbCameraFrames.type() == CV_8UC3);
    Mat *scribbleFrame = new Mat(rgbCameraFrames.rows, rgbCameraFrames.cols, CV_8UC3);


    double frame_middle = rgbCameraFrames.cols / 2;//get middle of the frame

    //test draw
    //cv::circle(cv_ptr->image, cv::Point(frame_middle, 50), 10, CV_RGB(255,0,0));

    GaussianBlur(rgbCameraFrames, colorTrackingFrames, Size(11,11), 0, 0); //reduce the noise

    inRange(colorTrackingFrames, Scalar(0, 0 , 80), Scalar(60,60,255), colorTrackingFrames);//make red color to white and rest to black

    colorMoment =  moments(colorTrackingFrames);
    double moment10 =  cvGetSpatialMoment(&colorMoment, 1, 0);//Sum of X coordinates of all white pixels
    double moment01 =  cvGetSpatialMoment(&colorMoment, 0, 1);//Sum of X coordinates of all white pixels
    double area = cvGetSpatialMoment(&colorMoment, 0, 0); //Sum of all white pixels

    lastX = posX;
    lastY = posY;

    posX = moment10/area;
    posY = moment01/area;

    geometry_msgs::Twist vel_msg;

    double difference = posX - frame_middle;
    ROS_INFO("frame_middle: %f, posX: %d, diffrence: %f",frame_middle, posX, difference);

    if(posX >= frame_middle * 10 || posX <= 0)
    {
    	ROS_INFO("ROTATE");
    	vel_msg.angular.z = -1;
    }
    else if( difference <50 && difference > -50)
    {
    	ROS_INFO("FORWARD");
    	vel_msg.linear.x = 0.25;
    }
    else if(difference >= 50)
    {
    	//turn right
    	ROS_INFO("TURN RIGHT");
    	vel_msg.angular.z = -0.5;
    }
    else if(difference <= -50)
    {
    	//turn left
    	ROS_INFO("TURN LEFT");
    	vel_msg.angular.z = 0.5;
    }

    if(posX > 0 && posY > 0  && lastX >0 && lastY > 0)
    {
    	line(*scribbleFrame, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,255,255), 1);
    	line(rgbCameraFrames, cvPoint(posX, posY), cvPoint(lastX,lastY),cvScalar(0,255,255), 5);
    }

    cv::imshow(scribbleWindow, *scribbleFrame);


/*
    // Draw an example circle on the video stream
    if (cv_ptr->image.rows > 60 && cv_ptr->image.cols > 60)
      cv::circle(cv_ptr->image, cv::Point(50, 50), 10, CV_RGB(255,0,0));
*/
    // Update GUI Window
    cv::imshow(OPENCV_WINDOW, cv_ptr->image);
    cv::waitKey(3);

    vel_pub_.publish(vel_msg);
    // Output modified video stream
    image_pub_.publish(cv_ptr->toImageMsg());

  }
Пример #13
0
    bool detectFiducials(cob_object_detection_msgs::DetectionArray& detection_array, cv::Mat& color_image)
    {
        int id_start_idx = 2351;
        unsigned int marker_array_size = 0;
        unsigned int pose_array_size = 0;

        // Detect fiducials
        std::vector<ipa_Fiducials::t_pose> tags_vec;
        std::vector<std::vector<double> >vec_vec7d;
        if (m_pi_tag->GetPose(color_image, tags_vec) & ipa_Utils::RET_OK)
        {
            pose_array_size = tags_vec.size();

            // TODO: Average results
            for (unsigned int i=0; i<pose_array_size; i++)
            {
                cob_object_detection_msgs::Detection fiducial_instance;
                fiducial_instance.label = "pi-tag"; //tags_vec[i].id;
                fiducial_instance.detector = "Fiducial_PI";
                fiducial_instance.score = 0;
                fiducial_instance.bounding_box_lwh.x = 0;
                fiducial_instance.bounding_box_lwh.y = 0;
                fiducial_instance.bounding_box_lwh.z = 0;

                // TODO: Set Mask
                cv::Mat frame(3,4, CV_64FC1);
                for (int k=0; k<3; k++)
                    for (int l=0; l<3; l++)
                        frame.at<double>(k,l) = tags_vec[i].rot.at<double>(k,l);
                frame.at<double>(0,3) = tags_vec[i].trans.at<double>(0,0);
                frame.at<double>(1,3) = tags_vec[i].trans.at<double>(1,0);
                frame.at<double>(2,3) = tags_vec[i].trans.at<double>(2,0);
                std::vector<double> vec7d = FrameToVec7(frame);
                vec_vec7d.push_back(vec7d);

                // Results are given in CfromO
                fiducial_instance.pose.pose.position.x =  vec7d[0];
                fiducial_instance.pose.pose.position.y =  vec7d[1];
                fiducial_instance.pose.pose.position.z =  vec7d[2];
                fiducial_instance.pose.pose.orientation.w =  vec7d[3];
                fiducial_instance.pose.pose.orientation.x =  vec7d[4];
                fiducial_instance.pose.pose.orientation.y =  vec7d[5];
                fiducial_instance.pose.pose.orientation.z =  vec7d[6];

                fiducial_instance.pose.header.stamp = received_timestamp_;
                fiducial_instance.pose.header.frame_id = received_frame_id_;

                detection_array.detections.push_back(fiducial_instance);
                ROS_INFO("[fiducials] Detected PI-Tag '%s' at x,y,z,rw,rx,ry,rz ( %f, %f, %f, %f, %f, %f, %f ) ",
                         fiducial_instance.label.c_str(), vec7d[0], vec7d[1], vec7d[2],
                         vec7d[3], vec7d[4], vec7d[5], vec7d[6]);
            }
        }
        else
        {
            pose_array_size = 0;
        }

        // Publish 2d image
        if (publish_2d_image_)
        {
            for (unsigned int i=0; i<pose_array_size; i++)
            {
                RenderPose(color_image, tags_vec[i].rot, tags_vec[i].trans);

                cv_bridge::CvImage cv_ptr;
                cv_ptr.image = color_mat_8U3_;
                cv_ptr.encoding = CobFiducialsNode::color_image_encoding_;
                img2D_pub_.publish(cv_ptr.toImageMsg());
            }
        }

        // Publish tf
        if (publish_tf_)
        {
            for (unsigned int i=0; i<pose_array_size; i++)
            {
                // Broadcast transform of fiducial
                tf::Transform transform;
                std::stringstream tf_name;
                tf_name << "pi_tag" <<"_" << "0";
                transform.setOrigin(tf::Vector3(vec_vec7d[i][0], vec_vec7d[i][1], vec_vec7d[i][2]));
                transform.setRotation(tf::Quaternion(vec_vec7d[i][4], vec_vec7d[i][5], vec_vec7d[i][6], vec_vec7d[i][3]));
                tf_broadcaster_.sendTransform(tf::StampedTransform(transform, ros::Time::now(), received_frame_id_, tf_name.str()));
            }
        }

        // Publish marker array
        if (publish_marker_array_)
        {
            // 3 arrows for each coordinate system of each detected fiducial
            marker_array_size = 3*pose_array_size;
            if (marker_array_size >= prev_marker_array_size_)
            {
                marker_array_msg_.markers.resize(marker_array_size);
            }

            // publish a coordinate system from arrow markers for each object
            for (unsigned int i=0; i<pose_array_size; i++)
            {
                for (unsigned int j=0; j<3; j++)
                {
                    unsigned int idx = 3*i+j;
                    marker_array_msg_.markers[idx].header.frame_id = received_frame_id_;// "/" + frame_id;//"tf_name.str()";
                    marker_array_msg_.markers[idx].header.stamp = received_timestamp_;
                    marker_array_msg_.markers[idx].ns = "fiducials";
                    marker_array_msg_.markers[idx].id =  id_start_idx + idx;
                    marker_array_msg_.markers[idx].type = visualization_msgs::Marker::ARROW;
                    marker_array_msg_.markers[idx].action = visualization_msgs::Marker::ADD;
                    marker_array_msg_.markers[idx].color.a = 0.85;
                    marker_array_msg_.markers[idx].color.r = 0;
                    marker_array_msg_.markers[idx].color.g = 0;
                    marker_array_msg_.markers[idx].color.b = 0;

                    marker_array_msg_.markers[idx].points.resize(2);
                    marker_array_msg_.markers[idx].points[0].x = 0.0;
                    marker_array_msg_.markers[idx].points[0].y = 0.0;
                    marker_array_msg_.markers[idx].points[0].z = 0.0;
                    marker_array_msg_.markers[idx].points[1].x = 0.0;
                    marker_array_msg_.markers[idx].points[1].y = 0.0;
                    marker_array_msg_.markers[idx].points[1].z = 0.0;

                    if (j==0)
                    {
                        marker_array_msg_.markers[idx].points[1].x = 0.2;
                        marker_array_msg_.markers[idx].color.r = 255;
                    }
                    else if (j==1)
                    {
                        marker_array_msg_.markers[idx].points[1].y = 0.2;
                        marker_array_msg_.markers[idx].color.g = 255;
                    }
                    else if (j==2)
                    {
                        marker_array_msg_.markers[idx].points[1].z = 0.2;
                        marker_array_msg_.markers[idx].color.b = 255;
                    }

                    marker_array_msg_.markers[idx].pose.position.x = vec_vec7d[i][0];
                    marker_array_msg_.markers[idx].pose.position.y = vec_vec7d[i][1];
                    marker_array_msg_.markers[idx].pose.position.z = vec_vec7d[i][2];
                    marker_array_msg_.markers[idx].pose.orientation.x = vec_vec7d[i][4];
                    marker_array_msg_.markers[idx].pose.orientation.y = vec_vec7d[i][5];
                    marker_array_msg_.markers[idx].pose.orientation.z = vec_vec7d[i][6];
                    marker_array_msg_.markers[idx].pose.orientation.w = vec_vec7d[i][3];

                    ros::Duration one_hour = ros::Duration(1); // 1 second
                    marker_array_msg_.markers[idx].lifetime = one_hour;
                    marker_array_msg_.markers[idx].scale.x = 0.01; // shaft diameter
                    marker_array_msg_.markers[idx].scale.y = 0.015; // head diameter
                    marker_array_msg_.markers[idx].scale.z = 0; // head length 0=default
                }

                if (prev_marker_array_size_ > marker_array_size)
                {
                    for (unsigned int i = marker_array_size; i < prev_marker_array_size_; ++i)
                    {
                        marker_array_msg_.markers[i].action = visualization_msgs::Marker::DELETE;
                    }
                }
                prev_marker_array_size_ = marker_array_size;

                fiducials_marker_array_publisher_.publish(marker_array_msg_);
            }
        } // End: publish markers

        if (tags_vec.empty())
            return false;
        return true;
    }
Пример #14
0
  void imageCb(const sensor_msgs::ImageConstPtr& msg)
  {
    int i,j;
    
    cv_bridge::CvImagePtr cv_ptr;
    try
      {
	cv_ptr = cv_bridge::toCvCopy(msg, enc::BGR8);
      }
    catch(cv_bridge::Exception& e)
      {
	ROS_ERROR("cv_bridge exception: %s", e.what());
	return;
      }
    //sensor_msgs::CvBridge bridge;
    //we need this object bridge for facilitating conversion from ros-img to opencv
    
    //IplImage* img = bridge.imgMsgToCv(msg,"rgb8");
    //image being converted from ros to opencv using cvbridge
    IplImage imgObj = cv_ptr->image;
    IplImage* img = &imgObj;

    IplImage* gray_out = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );

    cvCvtColor(img , gray_out, CV_RGB2GRAY);

     int width = 752;
     int height = 480;
     int MaxVal[width][3],a,b;
     int maks;
     int wart;
     double X,Y,ZM;


	float focalLength = 748.821716;
	float baseline = 0.060234;

	float distance;
	int   disparity;
	float numerator = focalLength * baseline;

IplImage* mapa = cvCreateImage( cvSize(600, 600), IPL_DEPTH_8U, 1 ); 
	cvCircle(mapa, cvPoint(300,600), 200, cvScalar(50),1,8,0);
	cvCircle(mapa, cvPoint(300,600), 400, cvScalar(50),1,8,0);
	cvCircle(mapa, cvPoint(300,600), 150, cvScalar(50),1,8,0);
	cvCircle(mapa, cvPoint(300,600), 100, cvScalar(50),1,8,0);

//Znajdowanie najjasniejszych punktow w kolumnach i zapisywanie ich do wektora MaxVal[]



     for(i=0;i<752;i++)
    {
     maks = 0;
	for(j=60;j<440;j++)
	{
     	wart = (int)cvGetReal2D( gray_out, j, i );
	if(wart >= maks)
       {
     	maks=wart;
     	MaxVal[i][0]=maks;
     	MaxVal[i][1]=i;
     	MaxVal[i][2]=j;
        }
	}

//filtry

if ((MaxVal[i-2][0]==0 && MaxVal[i][0]==0 && MaxVal[i-1][0]!=0)  )
{
MaxVal[i-1][0]=0;
}
if ( abs(MaxVal[i-3][0]-MaxVal[i-2][0])>40 && abs(MaxVal[i][0]-MaxVal[i-1][0])>40  )
{
MaxVal[i-1][0]=(MaxVal[i][0]+MaxVal[i-1][0])/10;
MaxVal[i-2][0]=(MaxVal[i-3][0]+MaxVal[i-2][0])/10;
}
if ( abs(MaxVal[i-2][0]-MaxVal[i-1][0])>40 && abs(MaxVal[i][0]-MaxVal[i-1][0])>40  )
{
MaxVal[i-1][0]=(MaxVal[i-1][0]+MaxVal[i-2][0])/2;
}
}


//Przeliczenie odleglosci
     for(i=0;i<752;i++)
    {
distance = 100;
disparity = MaxVal[i][0];
if(disparity != 0)
{
distance = (numerator*255) / (disparity*64);
}
//      printf("MaksVal %d %d =%d\t odleglosc %lf\n", i, MaxVal[i][2], MaxVal[i][0], distance);

if(distance < 2.5)
{
	X=(i-752/2)*0.006;
	ZM=pow(3*3+X*X,0.5);
	X=300+(X*distance*1000/(ZM*5));
	Y=600-(3*distance*1000/(ZM*5));

	cvSet2D(mapa, (int) Y, (int) X,cvScalar(255));
}


    } 
  // printf("MaksVal %d %d =%d\n", MaxVal[0][1], MaxVal[0][2], MaxVal[0][0]);
/*  cvNamedWindow("mapa", CV_WINDOW_AUTOSIZE); 
  cvMoveWindow("mapa", 100, 100);
  cvShowImage("mapa",mapa);*/
img=mapa;
   // cvShowImage( "Original image", gray_out);
    
    cv::waitKey(3);
    
    // convert to ros msg
    //sensor_msgs::ImagePtr out = sensor_msgs::CvBridge::cvToImgMsg(img, "rgb8");
    // image converted from opencv to ros for publishing
    //image_pub_.publish(out);

  cv_ptr->image = mapa;
  cv_ptr->encoding = sensor_msgs::image_encodings::MONO8;

    image_pub_.publish(cv_ptr->toImageMsg());
//sleep(1);
    //*/
  }
Пример #15
0
void image_cb (const sensor_msgs::ImageConstPtr& image) {
    int threshold_value;
    ros::param::param<int>("/vision_threshold_binary", threshold_value, 95);
    int min_objekt_size;
    ros::param::param<int>("/vision_min_objekt_size", min_objekt_size, 4000);
    int keep_object_count;
    ros::param::param<int>("/vision_keep_object_count", keep_object_count, 4);
    int canny_threshold;
    ros::param::param<int>("/vision_canny_threshold", canny_threshold, 40);
    int corner_threshold;
    ros::param::param<int>("/vision_corner_threshold", corner_threshold, 25);
    int const max_BINARY_value = 255;
    Mat grey, thresholded, filtered;
#ifdef SHOW_IMAGE_COUNT
    static long imcount = 0;
    cout << "Image "<< ++imcount <<" recieved" << endl;
#endif
    cv_bridge::CvImagePtr cv_ptr;
    cv_bridge::CvImagePtr cv_pthresholded(new cv_bridge::CvImage());
    cv_bridge::CvImagePtr cv_pdebug(new cv_bridge::CvImage());
    sensor_msgs::ImagePtr msg_thresholded;
    sensor_msgs::ImagePtr msg_debug;
    try
    {
        cv_ptr = cv_bridge::toCvCopy(image, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
    }
    Mat frame = cv_ptr->image.clone();
    
    cvtColor( frame, grey, CV_RGB2GRAY );  
    
    int dilate_size = 3;
    Mat dilate_element = getStructuringElement( MORPH_RECT,
                                           Size( 2*dilate_size + 1, 2*dilate_size+1 ),
                                           Point( dilate_size, dilate_size ) );

    threshold( grey, thresholded, threshold_value, max_BINARY_value, 1 ); //Binary Inverted
    dilate(thresholded, filtered, dilate_element);
    erode(filtered, filtered, dilate_element);
    
    try
    {
        cv_pthresholded->image = filtered;
        cv_pthresholded->header = cv_ptr->header;
        cv_pthresholded->encoding = sensor_msgs::image_encodings::MONO8;
        msg_thresholded = cv_pthresholded->toImageMsg();
    }
    catch (cv_bridge::Exception& e)
    {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
    }
    pub_thresholded.publish(msg_thresholded);
    
    
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    
    findContours(filtered, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    
    /// Approximate contours to polygons + get bounding rects and circles
    vector<vector<Point> > contours_poly( contours.size() );
    vector<Rect> boundRect( contours.size() );
    vector<RotatedRect> minRect( contours.size() );
    
    for( size_t i = 0; i < contours.size(); i++ )
    { 
        approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
        boundRect[i] = boundingRect( Mat(contours_poly[i]) );
        minRect[i] = minAreaRect( Mat(contours_poly[i]) );
    }
    
    size_t filteredOutBound = 0;
    size_t filteredOutRot = 0;
    vector<Rect> bigBoundRecs(boundRect.size());
    vector<RotatedRect> bigRotRecs(minRect.size());
    
    for( size_t i = 0; i < boundRect.size(); ++i ){ 
        if (boundRect[i].area() > static_cast<size_t>(min_objekt_size)) {
            bigBoundRecs[i-filteredOutBound] = boundRect[i];
        } else {
            ++filteredOutBound;
        }
    }

    for( size_t i = 0; i < minRect.size(); ++i ){
        if (getRotatedArea(minRect[i]) > min_objekt_size) {
            bigRotRecs[i-filteredOutRot] = minRect[i];
        } else {
            ++filteredOutRot;
        }
    }
    

    /// Filter the x Largest Objects (BoundRects)
    long filteredSizeBound;
    if (boundRect.size()-filteredOutBound >= static_cast<size_t>(keep_object_count)){
        filteredSizeBound = keep_object_count;
    } else {
        filteredSizeBound = boundRect.size()-filteredOutBound;
    }

    /// Filter the x Largest Objects (RotatedRects)
    long filteredSizeRot;
    if (minRect.size()-filteredOutRot >= static_cast<size_t>(keep_object_count)){
        filteredSizeRot = keep_object_count;
    } else {
        filteredSizeRot = minRect.size()-filteredOutRot;
    }

    
    
    vector<Rect> filteredRectBound(filteredSizeBound);
    for( size_t i = 0; i < bigBoundRecs.size()-filteredOutBound; ++i ){
        if( i < filteredRectBound.size()){
            filteredRectBound[i] = bigBoundRecs[i];
        } else {
            for(size_t z = 0; z < filteredRectBound.size(); ++z){
                if (sortBoundingBoxes(bigBoundRecs[i], filteredRectBound[z])){
                    filteredRectBound[z] = bigBoundRecs[i];
                    break;
                }
            }
        }
    }

    vector<RotatedRect> filteredRectRot(filteredSizeRot);
        for( size_t i = 0; i < bigRotRecs.size()-filteredOutRot; ++i ){
            if( i < filteredRectRot.size()){
                filteredRectRot[i] = bigRotRecs[i];
            } else {
                for(size_t z = 0; z < filteredRectRot.size(); ++z){
                    if (sortRotatedBoxes(bigRotRecs[i], filteredRectRot[z])){
                        filteredRectRot[z] = bigRotRecs[i];
                        break;
                    }
                }
            }
        }

    Rect bestBound;
    int bestRatioBound = 0;

	for( size_t i = 0; i < filteredRectBound.size(); ++i ){
		if (bestRatioBound < filteredRectBound[i].height/filteredRectBound[i].width){
			bestRatioBound = filteredRectBound[i].height/filteredRectBound[i].width;
			bestBound = filteredRectBound[i];
		}
	}

    RotatedRect bestRotated;
    int bestRatioRot = 0;

	for( size_t i = 0; i < filteredRectRot.size(); ++i ){
			if (bestRatioRot < filteredRectRot[i].size.height/filteredRectRot[i].size.width){
				bestRatioRot = filteredRectRot[i].size.height/filteredRectRot[i].size.width;
				bestRotated = filteredRectRot[i];
			}
		}

	list<DetectedObject*> detectedObjects = getDetectedObjects(filteredRectRot);
    
	filterDetectedObjects(cv_ptr->header, grey, filtered, canny_threshold, corner_threshold, detectedObjects);

    /// Draw polygonal contour + bounding rects + circles
    Mat drawing = frame.clone();
    drawBoundingBoxes(filteredRectBound, drawing);
    drawDetectedObjects(detectedObjects, drawing);

	youbot_manipulation_vision::DetectedObjects objects;
	objects.header = cv_ptr->header;
	for (list<DetectedObject*>::const_iterator iterator = detectedObjects.begin(), end = detectedObjects.end(); iterator != end; ++iterator) {
		DetectedObject* obj = *iterator;
		RotatedRect rrect = obj->getRotatedRect();
		Rect bbox = obj->getBoundingBox();
		youbot_manipulation_vision::DetectedObject obj_msg;
		youbot_manipulation_vision::RotatedRect rrect_msg;
		youbot_manipulation_vision::BoundingBox bbox_msg;

		geometry_msgs::Point pCenter;
		pCenter.x = rrect.center.x;
		pCenter.y = rrect.center.y;

		geometry_msgs::Point pPosition;
		pPosition.x = bbox.x;
		pPosition.y = bbox.y;

		rrect_msg.centerPoint = pCenter;
		rrect_msg.width = rrect.size.width;
		rrect_msg.height = rrect.size.height;
		rrect_msg.angle = rrect.angle;

		bbox_msg.position = pPosition;
		bbox_msg.width = bbox.width;
		bbox_msg.height = bbox.height;

		obj_msg.rrect = rrect_msg;
		obj_msg.bbox = bbox_msg;
		obj_msg.object_name = obj->getName();

		objects.objects.push_back(obj_msg);
	}

	pub_objects.publish(objects);

    try
    {
        cv_pdebug->image = drawing;
        cv_pdebug->header = cv_ptr->header;
        cv_pdebug->encoding = sensor_msgs::image_encodings::BGR8;
        msg_debug = cv_pdebug->toImageMsg();
    }
    catch (cv_bridge::Exception& e)
    {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
    }
    pub_debug.publish(msg_debug);
    
    
    cv_pthresholded->image.release();
    cv_pdebug->image.release();
    
    deleteDetectedObjectList(detectedObjects);
}
    void callback(const sensor_msgs::ImageConstPtr& msg)
    {
        if (image_0_ == NULL)
        {
            // Take first image:
            try
            {
                image_0_ = cv_bridge::toCvCopy(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take first image: " << e.what());
                return;
            }

            ROS_INFO("First image taken");

            // Detect keypoints:
            detector_->detect(image_0_->image, keypoints_0_);
            ROS_INFO_STREAM(keypoints_0_.size() << " points found.");

            // Extract keypoints' descriptors:
            extractor_->compute(image_0_->image, keypoints_0_, descriptors_0_);
        }
        else
        {
            // Take second image:
            try
            {
                image_1_ = cv_bridge::toCvShare(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take image: " << e.what());
                return;
            }

            // Detect keypoints:
            std::vector<cv::KeyPoint> keypoints_1;
            detector_->detect(image_1_->image, keypoints_1);
            ROS_INFO_STREAM(keypoints_1.size() << " points found on the new image.");

            // Extract keypoints' descriptors:
            cv::Mat descriptors_1;
            extractor_->compute(image_1_->image, keypoints_1, descriptors_1);

            // Compute matches:
            std::vector<cv::DMatch> matches;
            match(descriptors_0_, descriptors_1, matches);

            // Compute homography:
            cv::Mat H;
            homography(keypoints_0_, keypoints_1, matches, H);

            // Draw matches:
            const int s = std::max(image_0_->image.rows, image_0_->image.cols);
            cv::Size size(s, s);
            cv::Mat draw_image;
            warped_image_ = boost::make_shared<cv_bridge::CvImage>(
                    image_0_->header, image_0_->encoding,
                    cv::Mat(size, image_0_->image.type()));
            if (!H.empty()) // filter outliers
            {
                std::vector<char> matchesMask(matches.size(), 0);

                const size_t N = matches.size();
                std::vector<int> queryIdxs(N), trainIdxs(N);
                for (size_t i = 0; i < N; ++i)
                {
                    queryIdxs[i] = matches[i].queryIdx;
                    trainIdxs[i] = matches[i].trainIdx;
                }

                std::vector<cv::Point2f> points1, points2;
                cv::KeyPoint::convert(keypoints_0_, points1, queryIdxs);
                cv::KeyPoint::convert(keypoints_1, points2, trainIdxs);

                cv::Mat points1t;
                cv::perspectiveTransform(cv::Mat(points1), points1t, H);

                double maxInlierDist = threshold_ < 0 ? 3 : threshold_;
                for (size_t i1 = 0; i1 < points1.size(); ++i1)
                {
                    if (cv::norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
                        matchesMask[i1] = 1;
                }
                // draw inliers
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

                // draw outliers
                for (size_t i1 = 0; i1 < matchesMask.size(); ++i1)
                    matchesMask[i1] = !matchesMask[i1];
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 0, 255), cv::Scalar(255, 0, 0),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_OVER_OUTIMG | cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

                ROS_INFO_STREAM("Number of inliers: " << cv::countNonZero(matchesMask));

                // Wrap the new image using the homography,
                // so we should have something similar to the original image:
                warpPerspective(
                        image_1_->image, warped_image_->image, H, size,
                        cv::INTER_LINEAR | cv::WARP_INVERSE_MAP);

                // Print the homography found:
                ROS_INFO_STREAM("Homography = " << H);
            }
            else
            {
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image);

                image_1_->image.copyTo(warped_image_->image);

                ROS_WARN_STREAM("No homography transformation found!");
            }

            // Publish warped image (using homography):
            warped_image_->header = image_1_->header;
            pub_.publish(warped_image_->toImageMsg());

            // Show images:
            cv::imshow("correspondences", draw_image);
            cv::imshow("homography", warped_image_->image);
            cv::waitKey(3);
        }
    }
Пример #17
0
    /*
     * @brief コールバック(動画処理の基本呼び出し部分)
     */
    void imageCb(const sensor_msgs::ImageConstPtr &msg)
    {
        cv_bridge::CvImagePtr cap;
        Mat in_img;
        Mat grayframe;
        Mat img_matches;
        Mat cap_descriptors;
        Mat H;
        Mat lastimage;

        std::vector<KeyPoint> cap_keypoint;
        std::vector<std::vector<DMatch> > matches;
        std::vector<DMatch> good_matches;
        std::vector<Point2f> obj;
        std::vector<Point2f> scene;
        std::vector<Point2f> scene_corners(4);

        //Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
        FlannBasedMatcher matcher;

        try{
            cap = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
            in_img = cap->image;
        }
        catch(cv_bridge::Exception& e){
            ROS_ERROR("cv_brige exception: %s", e.what());
            return;
        }

        if(in_img.empty()) {
            std::cerr<<"No capture frame img"<<std::endl;
        }
        else{
            cvtColor(cap->image, grayframe, CV_BGR2GRAY);
            computeSURF(grayframe, cap_keypoint, cap_descriptors);
            //drawAKAZEKeypoint(&cap->image, cap_keypoint, Scalar(0, 0, 255));
            obj_corners[0] = cvPoint(0,0);
            obj_corners[1] = cvPoint( in_img.cols, 0 );
            obj_corners[2] = cvPoint( in_img.cols, in_img.rows );
            obj_corners[3] = cvPoint( 0, in_img.rows );

            std::cout<<imagenum<<std::endl;
            matcher.knnMatch(cap_descriptors, descriptors_0, matches, 2);
            for(int i = 0; i < min(descriptors_0.rows-1,(int) matches.size()); i++){
                if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0)){
                    good_matches.push_back(matches[i][0]);
                }
            }

            drawMatches(grayframe, cap_keypoint, object_img, keypoint_0, good_matches, img_matches);

            if(good_matches.size() >= 4){
                for(int i = 0; i < good_matches.size(); i++){
                    obj.push_back(keypoint_0[good_matches[i].queryIdx].pt);
                    scene.push_back(cap_keypoint[good_matches[i].trainIdx].pt);
                }
            }

            H = findHomography(obj, scene, CV_RANSAC);
            (obj_corners, scene_corners, H);
            line( img_matches, scene_corners[0] + Point2f( object_img.cols, 0), scene_corners[1] + Point2f( object_img.cols, 0), Scalar(0, 255, 0), 4 );
            line( img_matches, scene_corners[1] + Point2f( object_img.cols, 0), scene_corners[2] + Point2f( object_img.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[2] + Point2f( object_img.cols, 0), scene_corners[3] + Point2f( object_img.cols, 0), Scalar( 0, 255, 0), 4 );
            line( img_matches, scene_corners[3] + Point2f( object_img.cols, 0), scene_corners[0] + Point2f( object_img.cols, 0), Scalar( 0, 255, 0), 4 );



#ifdef MULTI
            for(int i = 0; i < IMAGENUM; i++){
                matcher->match(cap_descriptors, trainDescCollection[i], matches);
                drawMatches(cap->image, cap_keypoint, trainImgCollection[i], trainPointCollection[i], matches, img_matches);
            }
#endif

        }

        imshow(OPENCV_WINDOW, img_matches);
        waitKey(3);
        image_pub_.publish(cap->toImageMsg());
    }
Пример #18
0
  void image_callback(const sensor_msgs::ImageConstPtr& msg)
  {
    static tf::TransformBroadcaster br;
    if(cam_info_received)
    {
      ros::Time curr_stamp(ros::Time::now());
      cv_bridge::CvImagePtr cv_ptr;
      try
      {
        cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
        inImage = cv_ptr->image;

        //detection results will go into "markers"
        markers.clear();
        //Ok, let's detect
        mDetector.detect(inImage, markers, camParam, marker_size, false);
        //for each marker, draw info and its boundaries in the image
        for(size_t i=0; i<markers.size(); ++i)
        {
          // only publishing the selected marker
          if(markers[i].id == marker_id)
          {
            tf::Transform transform = aruco_ros::arucoMarker2Tf(markers[i]);
            tf::StampedTransform cameraToReference;
            cameraToReference.setIdentity();

            if ( reference_frame != camera_frame )
            {
              getTransform(reference_frame,
                           camera_frame,
                           cameraToReference);
            }

            transform = 
              static_cast<tf::Transform>(cameraToReference) 
              * static_cast<tf::Transform>(rightToLeft) 
              * transform;

            tf::StampedTransform stampedTransform(transform, curr_stamp,
                                                  reference_frame, marker_frame);
            br.sendTransform(stampedTransform);
            geometry_msgs::PoseStamped poseMsg;
            tf::poseTFToMsg(transform, poseMsg.pose);
            poseMsg.header.frame_id = reference_frame;
            poseMsg.header.stamp = curr_stamp;
            pose_pub.publish(poseMsg);

            geometry_msgs::TransformStamped transformMsg;
            tf::transformStampedTFToMsg(stampedTransform, transformMsg);
            transform_pub.publish(transformMsg);

            geometry_msgs::Vector3Stamped positionMsg;
            positionMsg.header = transformMsg.header;
            positionMsg.vector = transformMsg.transform.translation;
            position_pub.publish(positionMsg);
          }
          // but drawing all the detected markers
          markers[i].draw(inImage,cv::Scalar(0,0,255),2);
        }

        //draw a 3d cube in each marker if there is 3d info
        if(camParam.isValid() && marker_size!=-1)
        {
          for(size_t i=0; i<markers.size(); ++i)
          {
            CvDrawingUtils::draw3dAxis(inImage, markers[i], camParam);
          }
        }

        if(image_pub.getNumSubscribers() > 0)
        {
          //show input with augmented information
          cv_bridge::CvImage out_msg;
          out_msg.header.stamp = curr_stamp;
          out_msg.encoding = sensor_msgs::image_encodings::RGB8;
          out_msg.image = inImage;
          image_pub.publish(out_msg.toImageMsg());
        }

        if(debug_pub.getNumSubscribers() > 0)
        {
          //show also the internal image resulting from the threshold operation
          cv_bridge::CvImage debug_msg;
          debug_msg.header.stamp = curr_stamp;
          debug_msg.encoding = sensor_msgs::image_encodings::MONO8;
          debug_msg.image = mDetector.getThresholdedImage();
          debug_pub.publish(debug_msg.toImageMsg());
        }
      }
      catch (cv_bridge::Exception& e)
      {
        ROS_ERROR("cv_bridge exception: %s", e.what());
        return;
      }
    }
  }
Пример #19
0
  void imageCb(const sensor_msgs::ImageConstPtr& msg)
  {
    cv_bridge::CvImagePtr cv_ptr;
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }

    // Now, the frame is in "cv_ptr->image" 

    std::vector<Rect> faces;
    Mat frame_gray;
    Mat frame(cv_ptr->image);
    resize(frame, frame, Size(160, 120));
    cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
    equalizeHist( frame_gray, frame_gray );

    //t = (double)cvGetTickCount();
    //-- Detect faces
    //face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(16, 12), Size(80, 60) ); // 30ms

    face_cascade.detectMultiScale( frame_gray, faces, 1.05, 2); // 35ms
    
    //t = (double)cvGetTickCount() - t;
    //printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );

    if (faces.size() > 0) {
        //myFilter((faces[0].x + faces[0].width/2) * 4 - 320, (faces[0].y + faces[0].height/2) * 4 - 240);
        //firstBodyFound = 1;
        before_filter_x = (faces[0].x + faces[0].width/2) * 4 - 320;
        before_filter_y = (faces[0].y + faces[0].height/2) * 4 - 240;
    }
    else
    {
        //myFilter(0.0, 0.0);
        before_filter_x = 0.0;
        before_filter_y = 0.0;
    }

    myFilter(before_filter_x, before_filter_y);

    gettimeofday(&tp, NULL);
    now = tp.tv_sec * 1000000 + tp.tv_usec; 
    double t = (double) (now - start_time) / 1000000;

    printf("%f    %f    %f\n", t, before_filter_x, filter_x);

    if (1) {
        size_t i = 0;
        Point center(filter_x + 320.0, filter_y + 240);
        cv::circle(cv_ptr->image, center, 50, CV_RGB(0,255,255), 5.0);

        /*
        rectangle(cv_ptr->image,
           Point(filter_x, filter_y),
           Point(filter_x + filter_width, filter_y + filter_height),
           Scalar(0, 255, 255),
           5,
           8);

        */

        // publishing
        brray.data[0] = 0.0; // x: 0.0 (depth)
        brray.data[1] = -filter_x; // y: -320 ~ +320
        brray.data[2] = -filter_y; // z: -240 ~ +240
        brray.data[3] = 0.0;
        brray.data[4] = 0.0;

        target_xywh_pub_.publish(brray);

// control base to head to the target
        geometry_msgs::TwistPtr cmd(new geometry_msgs::Twist());
	cmd->angular.z = -brray.data[1]/320 * 1.5; 
        cmd_pub_.publish(cmd);
    }

    // Draw an example circle on the video stream
    //if (cv_ptr->image.rows > 60 && cv_ptr->image.cols > 60)
    //  cv::circle(cv_ptr->image, cv::Point(50, 50), 10, CV_RGB(255,0,0));

    // Update GUI Window
    //cv::imshow(OPENCV_WINDOW, cv_ptr->image);
    //cv::waitKey(3);
    
    // Output modified video stream
    image_pub_.publish(cv_ptr->toImageMsg());
  }
Пример #20
0
void image_callback(const sensor_msgs::ImageConstPtr& msg)
{
	//sensor_msgs::CvBridge bridge;
	try
	{
		//last_img = bridge.imgMsgToCv(msg,"bgr8");
		cv_bridge::CvImagePtr ppp = cv_bridge::toCvCopy(msg, enc::BGR8);
		IplImage temp = ppp->image;		
		last_img = &temp;
		if (count%sampling_rate==0)
		{
			//resize the image to fit exactly tablet screen
			CvSize size = cvSize(width,heigth);
			IplImage* tmp_img = cvCreateImage(size, last_img->depth, last_img->nChannels);
			cvResize(last_img, tmp_img);
			if (head_position>(-(MAX_HEAD_POS-MIN_HEAD_POS)/2))//<
                        { 
			    if (cob_version==COB_3_6)
			    {
				    //printf("+++ rotating, head pos: %f\n",head_position);
				    IplImage* tmp_img_2 = cvCreateImage(size, last_img->depth, last_img->nChannels);
		                    CvPoint2D32f pivot = cvPoint2D32f(half_width,half_heigth);
		                    CvMat* rot_mat=cvCreateMat(2,3,CV_32FC1);
		                    cv2DRotationMatrix(pivot,180,1,rot_mat);
		                    cvWarpAffine(tmp_img,tmp_img_2,rot_mat);
				    //sensor_msgs::ImagePtr msg = sensor_msgs::CvBridge::cvToImgMsg(tmp_img_2,"bgr8");
				    //pub.publish(msg);

				    ppp->image=tmp_img_2;
				    pub.publish(ppp->toImageMsg());

				    cvReleaseImage(&tmp_img_2);
			    }
			    else
			    {
				    //printf("+++ normal, head pos: %f",head_position);
				    //sensor_msgs::ImagePtr msg = sensor_msgs::CvBridge::cvToImgMsg(tmp_img,"bgr8");
				    //pub.publish(msg);
	  			    ppp->image=tmp_img;
				    pub.publish(ppp->toImageMsg());
			    }
                        }
                        else
			{
				if (cob_version==COB_3_6)
	                        {
			    		//printf("+++ normal, head pos: %f",head_position);
			    		//sensor_msgs::ImagePtr msg = sensor_msgs::CvBridge::cvToImgMsg(tmp_img,"bgr8");
			    		//pub.publish(msg);
  			    		ppp->image=tmp_img;
			    		pub.publish(ppp->toImageMsg());
				}
				else
				{
				    //printf("+++ rotating, head pos: %f\n",head_position);
				    IplImage* tmp_img_2 = cvCreateImage(size, last_img->depth, last_img->nChannels);
		                    CvPoint2D32f pivot = cvPoint2D32f(half_width,half_heigth);
		                    CvMat* rot_mat=cvCreateMat(2,3,CV_32FC1);
		                    cv2DRotationMatrix(pivot,180,1,rot_mat);
		                    cvWarpAffine(tmp_img,tmp_img_2,rot_mat);
				    //sensor_msgs::ImagePtr msg = sensor_msgs::CvBridge::cvToImgMsg(tmp_img_2,"bgr8");
				    //pub.publish(msg);

				    ppp->image=tmp_img_2;
				    pub.publish(ppp->toImageMsg());

				    cvReleaseImage(&tmp_img_2);
				}
			}
                        cvReleaseImage(&tmp_img);
			count=0;
		}
		/*else
			printf("+\n");*/
		count++;
	}//catch(sensor_msgs::CvBridgeException& e)
	catch(cv_bridge::Exception& e)
	{
		ROS_ERROR("cannot convert");
	}
}	
Пример #21
0
  void imageCb(const sensor_msgs::ImageConstPtr& msg)
  {
    cv_bridge::CvImagePtr cv_ptr;
    try
      {
	cv_ptr = cv_bridge::toCvCopy(msg, enc::BGR8);
      }
    catch (cv_bridge::Exception& e)
      {
	ROS_ERROR("cv_bridge exception: %s", e.what());
	return;
      }

    /* Add any OpenCV processing here */

    // cv::GaussianBlur(cv_ptr->image,cv_ptr->image,cv::Size(31,31), (global_blur_width > 0) ? global_blur_width : 1);
    cv::Mat image_hsv;
    cv::cvtColor(cv_ptr->image, image_hsv, CV_BGR2HSV);
    for( int y = 0; y < image_hsv.rows; y++ )
    {
      for( int x = 0; x < image_hsv.cols; x++ )
      {
        image_hsv.at<cv::Vec3b>(y,x)[0] =
           cv::saturate_cast<uchar>( (image_hsv.at<cv::Vec3b>(y,x)[0] + 90 ) % 180 );
      }
    }
    cv::cvtColor(image_hsv, cv_ptr->image, CV_HSV2BGR);
    

 /* Gray scale image */
/*    cv::Mat filtered_image;
    cv::cvtColor(cv_ptr->image,filtered_image,CV_BGR2GRAY);
    
    // Image Filtering 
    cv::Mat element5(5,5,CV_8U,cv::Scalar(1));
    cv::morphologyEx(filtered_image,filtered_image,cv::MORPH_CLOSE,element5);
    cv::morphologyEx(filtered_image,filtered_image,cv::MORPH_OPEN,element5);
*/
    
//    cv::GaussianBlur(filtered_image,filtered_image,cv::Size(5,5),3.5);

    /*
    // Sharpen image
    cv::Mat sharpen_kernel(3,3,CV_32F,cv::Scalar(0));
    sharpen_kernel.at<float>(1,1)=5.0;
    sharpen_kernel.at<float>(0,1)=-1.0;
    sharpen_kernel.at<float>(2,1)=-1.0;
    sharpen_kernel.at<float>(1,0)=-1.0;
    sharpen_kernel.at<float>(1,2)=-1.0;
    cv::filter2D(filtered_image,filtered_image,filtered_image.depth(),sharpen_kernel);
    */

    //cv::blur(filtered_image,filtered_image,cv::Size(3,3));    

    // Show final filtered image 
/*    cv::namedWindow("Filtered Image");
    cv::imshow("Filtered Image",filtered_image);

    // Finding Contours 
    cv::Mat contoured_image;
    cv::morphologyEx(filtered_image,contoured_image,cv::MORPH_GRADIENT,cv::Mat());
    cv::namedWindow("MORPH_GRADIENT contours");
    cv::imshow("MORPH_GRADIENT contours",contoured_image);

    // threshold image 
    cv::Mat threshold_image;
    cv::threshold(contoured_image,threshold_image,global_min_threshold,255,cv::THRESH_BINARY);
    cv::namedWindow("Threshold Image");
    cv::imshow("Threshold Image",threshold_image);
    cv::createTrackbar("Min Threshold:","Threshold Image",&global_min_threshold,255,update_global_min_threshold);
    update_global_min_threshold(global_min_threshold,0);
    
    // Make bounding boxes:
    //// detect edges using threshold image
    std::vector<std::vector<cv::Point> > contours;
    std::vector<cv::Vec4i> hierarchy;

    cv::findContours(threshold_image,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_SIMPLE);

    //// find the rotated rectangles
    std::vector<cv::RotatedRect> minRect(contours.size());
    cv::RotatedRect new_rectangle;
    
    double length_side_a, length_side_b;
    cv::Point2f new_rect_points[4];
    

    for (int i=0; i<contours.size(); i++) {
      // only keep track of rectangles that might be cubelets... needs work
      
      new_rectangle=cv::minAreaRect(cv::Mat(contours[i]));
      new_rectangle.points(new_rect_points);
      
      std::cout<<"INFO:\t point.x = "<<new_rect_points[0].x<<std::endl;
      length_side_a=pow(new_rect_points[0].x-new_rect_points[1].x,2)+
        pow(new_rect_points[0].y-new_rect_points[1].y,2);
      length_side_b=pow(new_rect_points[0].x-new_rect_points[3].x,2)+
        pow(new_rect_points[0].y-new_rect_points[3].y,2);
      
      std::cout<<"INFO:\tsize, l_a/l_b = "<<new_rectangle.size.area()<<" ,"<<length_side_a/length_side_b<<std::endl;
      if (new_rectangle.size.area()>=500 && // minimum size requirement
          length_side_a/length_side_b>(100.0-global_squareness_ratio)/100.0 && // minimum squareness
          length_side_a/length_side_b<(100.0+global_squareness_ratio)/100.0) {
        minRect[i]=new_rectangle;
      }
    }
    
    //// Draw contours & rectangles
    cv::Mat contour_output(threshold_image.size(),CV_8U,cv::Scalar(255));
    cv::Mat bounding_boxes=cv::Mat::zeros(contour_output.size(),CV_8UC3);;
    int i=0;
    for (; i >=0; i=hierarchy[i][0]) {
      cv::Point2f rect_points[4];
      minRect[i].points(rect_points);
      cv::drawContours(bounding_boxes,contours,i,cv::Scalar(255,255,255),1,8,hierarchy,0,cv::Point());
      for (int j=0; j<4; j++) {
        cv::line(bounding_boxes,rect_points[j],rect_points[(j+1)%4],cv::Scalar(255),2,8);
      }
    }
    cv::namedWindow("bounding boxes");
    cv::imshow("bounding boxes",bounding_boxes);
    cv::createTrackbar("Out-of-square acceptance: ","bounding boxes",&global_squareness_ratio,100,update_global_squareness_ratio);
*/
   // end of processing
    cv::waitKey(3);
    
    image_pub_.publish(cv_ptr->toImageMsg());
  }
Пример #22
0
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
    STrackedObject own,station;
    if (state == STATE_INIT) {
        robot->initCharging(chargerDetected,maxMeasurements);
        robot->movePtu(0,0);
        if (chargerDetected) state = STATE_WAIT;
        if (chargerDetected == false && fabs(ptuPan)<0.01) state = STATE_SEARCH;
    }
    if (state == STATE_UNDOCK_INIT)
    {
        robot->movePtu(275,0);
        if (robot->wait()) {
            robot->moveByDistance(-0.55);
            state = STATE_UNDOCK_MOVE;
            robot->controlHead(100,180,0);
            robot->moveHead();
        }
    }
    if ((int) state < (int)STATE_RETRY) {
        if (image->bpp != msg->step/msg->width || image->width != msg->width || image->height != msg->height) {
            delete image;
            ROS_DEBUG("Readjusting image format from %ix%i %ibpp, to %ix%i %ibpp.",image->width,image->height,image->bpp,msg->width,msg->height,msg->step/msg->width);
            image = new CRawImage(msg->width,msg->height,msg->step/msg->width);
        }
        memcpy(image->data,(void*)&msg->data[0],msg->step*msg->height);

        //search image for circles
        for (int i = 0; i<3; i++) {
            lastSegmentArray[i] = currentSegmentArray[i];
            currentSegmentArray[i] = detectorArray[i]->findSegment(image,lastSegmentArray[i]);
            objectArray[i].valid = false;
            if (currentSegmentArray[i].valid)objectArray[i] = trans->transform(currentSegmentArray[i]);
        }
        //and publish the result
        memcpy((void*)&msg->data[0],image->data,msg->step*msg->height);
        imdebug.publish(msg);

        //is the ROBOT STATION label visible ?
        station = trans->getDock(objectArray);
        if (station.valid) {
            stationSpotted++;
            failedToSpotStationCount = 0;
            robot->controlHead(100,180/M_PI*atan2(station.y,station.x),-180/M_PI*atan2(station.z-0.2,station.x));
            switch (state) {
            case STATE_SEARCH:
                state = STATE_APPROACH;
                if (robot->approach(station,station.x)) state = STATE_ADJUST;
                break;
            case STATE_APPROACH:
                if (robot->approach(station)) {
                    state = STATE_ADJUST;
                    robot->adjust(station,station.y);
                }
                break;
            case STATE_UNDOCK_ADJUST_STEP1:
                robot->adjust(station,station.y*1.1);
                robot->halt();
                state = STATE_UNDOCK_ADJUST_STEP2;
                break;
            case STATE_UNDOCK_ADJUST_STEP2:
                if (robot->adjust(station,0.0,0.05)) {
                    robot->halt();
                    robot->measure(NULL,NULL,maxMeasurements);
                    state = STATE_UNDOCK_MEASURE1;
                }
                break;
            case STATE_UNDOCK_MEASURE1:
                own = trans->getOwnPosition(objectArray);
                if (robot->measure(&own)) {
                    robot->halt();
                    robot->measure(NULL,NULL,maxMeasurements);
                    state = STATE_UNDOCK_MEASURE2;
                }
                break;
            case STATE_UNDOCK_MEASURE2:
                own = trans->getOwnPosition(objectArray);
                if (robot->measure(&own)) {
                    robot->halt();
                    robot->movePtu(0,0);
                    sprintf(posString,"Robot position after undock: %f %f %f %f",own.x,own.y,own.z,own.yaw);
                    printf("%s\n",response);
                    success = true;
                    robot->injectPosition(-own.z,-own.x,own.yaw);
                    state = STATE_UNDOCKING_SUCCESS;
                }
                break;
            case STATE_ADJUST:
                if (robot->adjust(station)) {
                    state = STATE_MEASURE;
                    robot->measure(NULL,NULL,maxMeasurements);
                }
                break;
            case STATE_DOCK:
                if (robot->dock(station)) {
                    state = STATE_WAIT;
                    robot->measure(NULL,NULL,4*maxMeasurements,false);
                }
                break;
            case STATE_MEASURE:
                own = trans->getOwnPosition(objectArray);
                if (robot->measure(&own)) {
                    robot->halt();
                    if (fabs(own.x) < dockingPrecision) {
                        state = STATE_DOCK;
                        robot->startProg = station.x;
                    } else {
                        state = STATE_ROTATE;
                        rotateBy = M_PI/2;
                        if (own.x > 0) rotateBy = -rotateBy;
                        rotateBy += sin((own.x+trans->ownOffset.x)/(own.z+trans->ownOffset.z));
                        robot->rotateByAngle(rotateBy);
                        robot->moveByDistance(fabs(own.x));
                    }
                }
                break;
            case STATE_CALIBRATE:
                trans->clearOffsets();
                own = trans->getOwnPosition(objectArray);
                if (robot->measure(&own,&station)) {
                    trans->updateCalibration(own,station);
                    if(trans->saveCalibration()) {
                        success = true;
                        state = STATE_CALIBRATION_SUCCESS;
                        sprintf(response,"Calibration OK.");
                    } else {
                        success = false;
                        state = STATE_CALIBRATION_FAILURE;
                        sprintf(response,"Calibration OK, but the calibration parameters were saved neither in a file nor in the STRANDS datacentre. Autonomous charging will work for now, but you will have to perform calibration again after restarting ROS or the robot.");
                    }
                }
                break;
            case STATE_WAIT:
                own = trans->getOwnPosition(objectArray);
                if (robot->wait(&own,station,chargerDetected)) {
                    if (chargerDetected) {
                        state = STATE_DOCKING_SUCCESS;
                        success = true;
                    } else {
                        state = STATE_RETRY;
                    }
                    realPrecision = sqrt(own.x*own.x+own.y*own.y);
                }
                break;
            }
        } else {
            failedToSpotStationCount++;
            if (state == STATE_SEARCH) failedToSpotStationCount=0;
        }
        if (failedToSpotStationCount > maxFailures) {
            if (state == STATE_CALIBRATE) {
                state=STATE_CALIBRATION_FAILURE;
                sprintf(response,"Cannot see the ROBOT STATION tag. Calibration failed.");
            } else if (state == STATE_UNDOCK_ADJUST_STEP1 || state == STATE_UNDOCK_MEASURE1 || state == STATE_UNDOCK_ADJUST_STEP2|| state == STATE_UNDOCK_MEASURE2) {
                sprintf(response,"Cannot see the ROBOT STATION tag. Undocking was not successfull.");
                state=STATE_UNDOCKING_FAILURE;
            } else {
                state=STATE_DOCKING_FAILURE;
            }
        }
    }
}
  void do_work(const sensor_msgs::ImageConstPtr& msg, const std::string input_frame_from_msg)
  {
    // Work on the image.
    try
    {
      // Convert the image into something opencv can handle.
      cv::Mat frame = cv_bridge::toCvShare(msg, msg->encoding)->image;

      // Messages
      opencv_apps::FaceArrayStamped faces_msg;
      faces_msg.header = msg->header;

      // Do the work
      std::vector<cv::Rect> faces;
      cv::Mat frame_gray;

      cv::cvtColor( frame, frame_gray, cv::COLOR_BGR2GRAY );
      cv::equalizeHist( frame_gray, frame_gray );
      //-- Detect faces
#ifndef CV_VERSION_EPOCH
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0, cv::Size(30, 30) );
#else
      face_cascade_.detectMultiScale( frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

      for( size_t i = 0; i < faces.size(); i++ )
      {
        cv::Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
        cv::ellipse( frame,  center, cv::Size( faces[i].width/2, faces[i].height/2), 0, 0, 360, cv::Scalar( 255, 0, 255 ), 2, 8, 0 );
        opencv_apps::Face face_msg;
        face_msg.face.x = center.x;
        face_msg.face.y = center.y;
        face_msg.face.width = faces[i].width;
        face_msg.face.height = faces[i].height;

        cv::Mat faceROI = frame_gray( faces[i] );
        std::vector<cv::Rect> eyes;

        //-- In each face, detect eyes
#ifndef CV_VERSION_EPOCH
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0, cv::Size(30, 30) );
#else
        eyes_cascade_.detectMultiScale( faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
#endif

        for( size_t j = 0; j < eyes.size(); j++ )
        {
          cv::Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
          int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
          cv::circle( frame, eye_center, radius, cv::Scalar( 255, 0, 0 ), 3, 8, 0 );

          opencv_apps::Rect eye_msg;
          eye_msg.x = eye_center.x;
          eye_msg.y = eye_center.y;
          eye_msg.width = eyes[j].width;
          eye_msg.height = eyes[j].height;
          face_msg.eyes.push_back(eye_msg);
        }

        faces_msg.faces.push_back(face_msg);
      }
      //-- Show what you got
      if( debug_view_) {
        cv::imshow( "Face detection", frame );
        int c = cv::waitKey(1);
      }

      // Publish the image.
      sensor_msgs::Image::Ptr out_img = cv_bridge::CvImage(msg->header, msg->encoding,frame).toImageMsg();
      img_pub_.publish(out_img);
      msg_pub_.publish(faces_msg);
    }
    catch (cv::Exception &e)
    {
      NODELET_ERROR("Image processing error: %s %s %s %i", e.err.c_str(), e.func.c_str(), e.file.c_str(), e.line);
    }

    prev_stamp_ = msg->header.stamp;
  }
void imageCallback(const sensor_msgs::ImageConstPtr& original_image)
{
    if(is_robot_running==0){
            //Convert from the ROS image message to a CvImage suitable for working with OpenCV for processing
            cv_bridge::CvImagePtr cv_ptr;
            try
            {
                cv_ptr = cv_bridge::toCvCopy(original_image, enc::BGR8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR("tutorialROSOpenCV::main.cpp::cv_bridge exception: %s", e.what());
                return;
            }
        if(balls_written == 0){

            Mat orig_image;
            orig_image = cv_ptr->image;
            Mat src_gray, dst, dst2, dst3, dst4, abs_dst3, abs_dst4;
            Mat grad;
            /// Convert it to gray
            cvtColor( cv_ptr->image, src_gray, CV_BGR2GRAY );
            Sobel(src_gray, dst3, CV_16S, 1, 0, 3, 1, 0, BORDER_DEFAULT);
            convertScaleAbs( dst3, abs_dst3 );
            Sobel(src_gray, dst4, CV_16S, 0, 1, 3, 1, 0, BORDER_DEFAULT);
            convertScaleAbs( dst4, abs_dst4 );
            addWeighted( abs_dst3, 0.5, abs_dst4, 0.5, 0, dst );
            vector<Vec3f> circles;
            /// Apply the Hough Transform to find the circles
            HoughCircles(dst, circles, CV_HOUGH_GRADIENT, 1, dst.rows/8, 80, 40, 3, 20);
            circles_all.insert(circles_all.end(), circles.begin(), circles.end());

            if(circles.size()>0) licznik_wybrane++;
            if(circles.size()==0) no_balls_counter++;

            if(no_balls_counter==10){
                    send_no_balls();
            }

            if(licznik_wybrane==10) //mamy cala tablice pilek
            {
                std::cout << "20 razy byl obraz! \n";
                ball_list_cond();
                balls_written = 1;
                std::cout << "Pilki zapisane w skondensowanej formie \n";
             }
       }
        if(draw_balls==1){

            Point center_closest(cvRound(last_circle[0]), cvRound(last_circle[1]));
            int radius_closest = cvRound(last_circle[2]);

            //draw
            // circle center
            circle( cv_ptr->image, center_closest, 3, Scalar(255,255,0), -1, 8, 0 );
            // circle outline
            circle( cv_ptr->image, center_closest, radius_closest, Scalar(255,0,255), 3, 8, 0 );

            circle_pub.publish(cv_ptr->toImageMsg());

            draw_balls = 0;
        }
        //cv::waitKey(3);
   }

  }
Пример #25
0
  //subscriber callback function
  void callback(const sensor_msgs::ImageConstPtr& msg)
  {
	double delay = (double)getTickCount();
        double totaldelay = 0.0;

	cv_bridge::CvImagePtr cv_ptr;
	try
	{
	  cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
	}
	catch (cv_bridge::Exception& e)
	{
	  ROS_ERROR("cv_bridge exception: %s", e.what());
	  return;
	}

	// Load cascades
	if( !face_cascade.load( face_cascade_name ) ){ std::cout << "Error loading face cascade" << endl; };
  	// Leave out profile and eye detection for speed
	//if( !face_cascade.load( profile_cascade_name ) ){ std::cout << "Error loading profile cascade" << endl; };
	//if( !eyes_cascade.load( eyes_cascade_name ) ){ std::cout << "Error loading eyes cascade" << endl; };

	// Get image frame and apply classifier
	if (cv_ptr)
	{
	  long frmCnt = 0;
          double totalT = 0.0;
          double t = (double)getTickCount();

          std::vector<Rect> faces;
	  Mat frame_gray;

  	  cvtColor( cv_ptr->image, frame_gray, CV_BGR2GRAY );
  	  equalizeHist( frame_gray, frame_gray );

          //-- Detect faces
  	  face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

	  t=((double)getTickCount()-t)/getTickFrequency();
          totalT += t;
          frmCnt++;

          	for( size_t i = 0; i < faces.size(); i++ )
          	{
            	  Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
		  rectangle( cv_ptr->image, Point(faces[i].x, faces[i].y), Point(faces[i].x+faces[i].width,
				faces[i].y+faces[i].height), Scalar(255,0,255), 4, 8, 0);
            	  Mat faceROI = frame_gray( faces[i] );
		  //Leave out eye detection
            	  /*std::vector<Rect> eyes;

                  //-- In each face, detect eyes
            	  eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );

           	  for( size_t j = 0; j < eyes.size(); j++ )
           	    {
            	      Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5,
				faces[i].y + eyes[j].y + eyes[j].height*0.5 );
            	      int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
            	      circle( cv_ptr->image, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
           	    }*/
          	}
  	  //-- Show what you got
  	  imshow( OPENCV_WINDOW, cv_ptr->image );
	  cv::waitKey(3);

	  delay = ((double)getTickCount()-delay)/getTickFrequency();
          totaldelay += delay;

	  image_pub_.publish(cv_ptr->toImageMsg());

	  cout << "fps: " << 1.0/(totalT/(double)frmCnt) << endl;
//	  cout << "OpenCV delay: " << totaldelay << endl;
//          cout << " " << endl; 
	}
  }