void imageCallback (const sensor_msgs::ImageConstPtr & msg_ptr)
  {
    // Convert ROS Imput Image Message to IplImage
    try
    {
      cv_input_ = bridge_.imgMsgToCv (msg_ptr, "bgr8");
    }
    catch (sensor_msgs::CvBridgeException error)
    {
      ROS_ERROR ("CvBridge Input Error");
    }

    // Convert IplImage to cv::Mat
    img_in_ = cv::Mat (cv_input_).clone ();
    // Convert Input image from BGR to HSV
    cv::cvtColor (img_in_, img_hsv_, CV_BGR2HSV);
    // Display HSV Image in HighGUI window
    cv::imshow ("hsv", img_hsv_);

    // Needed to  keep the HighGUI window open
    cv::waitKey (3);

    // Convert cv::Mat to IplImage
    cv_output_ = img_hsv_;
    // Convert IplImage to ROS Output Image Message and Publish
    try
    {
      image_pub_.publish (bridge_.cvToImgMsg (&cv_output_, "bgr8"));
    }
    catch (sensor_msgs::CvBridgeException error)
    {
      ROS_ERROR ("CvBridge Output error");
    }
  }
Example #2
0
bool find_grip_point(image_processor::ProcessBridge::Request    &req,
                 image_processor::ProcessBridge::Response &res )
{
    sensor_msgs::Image image = req.image;
    sensor_msgs::ImagePtr img_ptr(new sensor_msgs::Image(image));
    sensor_msgs::CameraInfo cam_info = req.info;
    
    IplImage *cv_image = NULL;    
    try
        {
                cv_image = bridge_.imgMsgToCv(img_ptr, "bgr8");
        }
        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
        
        //Crop image
        cvSetImageROI(cv_image,cvRect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
        IplImage* cropped_image = cvCloneImage(cv_image);
        CvPoint2D64f temp_pts[128];
        double temp_params[128];
        
        IplImage *output_cv_image;
        int num_pts = 0;
        int num_params = 0;
        
        ROS_INFO("Ready to call find_grip_point...");
        output_cv_image = find_grip_point_process(cropped_image,temp_pts,temp_params,num_pts,num_params);
        

        for( int i = 0; i < num_pts; i++){
            res.pts_x.push_back(temp_pts[i].x+CROP_X);
            res.pts_y.push_back(temp_pts[i].y+CROP_Y);
        }
        for ( int i = 0; i < num_params; i++){
           res.params.push_back(temp_params[i]);
        }
        sensor_msgs::ImagePtr output_img_ptr;
        sensor_msgs::Image output_img;
        try
        {
                output_img_ptr = bridge_.cvToImgMsg(output_cv_image, "bgr8");
                output_img = *output_img_ptr;
        }
        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
    res.image_annotated = output_img;

    return true;
}
Example #3
0
void createRequest(re_vision::SearchFor &srv, const cv::Mat &image)
{
	//IplImage *img = cvCloneImage(&IplImage(image));
	IplImage img(image);
  
	try{
		sensor_msgs::Image::Ptr ros_img_ptr = m_bridge.cvToImgMsg(&img, "passthrough");
		srv.request.Image = sensor_msgs::Image(*ros_img_ptr);
    
	}catch (sensor_msgs::CvBridgeException error){
		ROS_ERROR("error in createRequest");
	}

	srv.request.Objects.clear();
	for(unsigned int j = 0; j < m_object_list.size(); ++j)
		srv.request.Objects.push_back(m_object_list[j]);
  
	srv.request.MaxPointsPerObject = 3;
}
void FeatureTracker::image_callback(const sensor_msgs::ImageConstPtr& msg, const sensor_msgs::CameraInfoConstPtr& info_msg) {
  //need pose data for each picture, need to publish a camera pose
  ros::Time acquisition_time = msg->header.stamp;
  geometry_msgs::PoseStamped basePose;
  geometry_msgs::PoseStamped mapPose;
  basePose.pose.orientation.w=1.0;
  ros::Duration timeout(3);
  basePose.header.frame_id="/base_link";
  mapPose.header.frame_id="/map";
  
  try {
    tf_listener_.waitForTransform("/camera_1_link", "/map", acquisition_time, timeout);
    tf_listener_.transformPose("/map", acquisition_time,basePose,"/camera_1_link",mapPose);
    printf("pose #%d %f %f %f\n",pic_number++,mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
  }
  catch (tf::TransformException& ex) {
    ROS_WARN("[map_maker] TF exception:\n%s", ex.what());
    printf("[map_maker] TF exception:\n%s", ex.what());
    return;
  }
  cam_model.fromCameraInfo(info_msg);
  
  
  
  
  // printf("callback called\n");
  try
  {
    // if you want to work with color images, change from mono8 to bgr8
    if(image_rect==NULL){
      image_rect = cvCloneImage(bridge.imgMsgToCv(msg, "mono8"));
      last_image= cvCloneImage(bridge.imgMsgToCv(msg, "mono8"));
      pyrA=cvCreateImage(cvSize(last_image->width+8,last_image->height/3.0), IPL_DEPTH_32F, 1);
      pyrB=cvCloneImage(pyrA);
      //  printf("cloned image\n");
    }
    else{
      //save the last image
      cvCopy(image_rect,last_image);
      cvCopy(bridge.imgMsgToCv(msg, "mono8"),image_rect);
      // printf("copied image\n");
    }
    if(output_image==NULL){
      output_image =cvCloneImage(image_rect);
    }
    if(eigImage==NULL){
      eigImage =cvCloneImage(image_rect);
    }
    if(tempImage==NULL){
      tempImage =cvCloneImage(image_rect);
    }
  }
  catch (sensor_msgs::CvBridgeException& e)
  {
    ROS_ERROR("Could not convert from '%s' to 'mono8'.", msg->encoding.c_str());
    return;
  }
  
  if(image_rect!=NULL) {
    cvCopy(image_rect,output_image);
    
    printf("got image\n");
    
    track_features(mapPose);
    
    //draw features on the image
    for(int i=0;i<last_feature_count;i++){
      CvPoint center=cvPoint((int)features[i].x,(int)features[i].y);
      cvCircle(output_image,center,10,cvScalar(150),2);
      
      char strbuf [10];
      
      int n=sprintf(strbuf,"%d",current_feature_id[ i] );
      std::string text=std::string(strbuf,n);
      
      CvFont font;
      
      cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX,1,1);
      
      cvPutText(output_image,text.c_str(),cvPoint(center.x,center.y+20),&font,cvScalar(255));
      
      
      cv::Point3d tempRay;
      cv::Point2d tempPoint=cv::Point2d(features[i]);
      cam_model.projectPixelTo3dRay(tempPoint,tempRay);
  //    printf("%f x  %f y  %f z\n",tempRay.x,tempRay.y,tempRay.z);
    }
    
  //  featureList[0].print();
    
    //determine error gradient
    
    int min_features=10;
    
  //  printf("ypr %f %f %f\n",yaw,pitch,roll);
    
    cv::Point3d error_sum=calc_error(min_features,0, 0, 0);
    printf("total error is : %f\n",error_sum.x);
    
    for(int i=0;i<featureList.size();i++){
      if(min_features<featureList[i].numFeatures()){
	printf("\n\n\nfeature %d\n",i);
	printf("mean: %f %f %f\n",featureList[i].currentMean.x, featureList[i].currentMean.y, featureList[i].currentMean.z);
	
      }
    }
    
    
//    double error_up= calc_error(min_features,yalpha, 0, 0);
  //  printf("total up yaw error is : %f\n",error_up);
//    double error_down= calc_error(min_features,-yalpha, 0, 0);
  //  printf("total down yaw error is : %f\n",error_down);
  /*  
     
    double yaw_change=0;
    if(error_up<error_sum && error_up<error_down){
      yaw_change=yalpha;
    }else if(error_down<error_sum && error_down<error_up){
      yaw_change=-yalpha;
    }else if(error_down!=error_sum&&error_sum!=error_up){
      yalpha/=2;
    }
    
    error_up=   calc_error(min_features,0,palpha, 0);
   // printf("total up pitch error is : %f\n",error_up);
    error_down=   calc_error(min_features,0,-palpha, 0);
   // printf("total down pitch error is : %f\n",error_down);
    
    double pitch_change=0;
    if(error_up<error_sum && error_up<error_down){
      pitch_change=palpha;
    }else if(error_down<error_sum && error_down<error_up){
      pitch_change=-palpha;
    }else if(error_down!=error_sum&&error_sum!=error_up){
      //palpha/=2;
    }
    
    error_up=  calc_error(min_features,0,0,ralpha);
   // printf("total up roll error is : %f\n",error_up);
    
    error_down=   calc_error(min_features,0,0,-ralpha);
   // printf("total down roll error is : %f\n",error_down);
    
    double roll_change=0;
    if(error_up<error_sum && error_up<error_down){
      roll_change=ralpha;
    }else if(error_down<error_sum && error_down<error_up){
      roll_change=-ralpha;
    }else if(error_down!=error_sum&&error_sum!=error_up){
      ralpha/=2;
    }
    
  //  yaw+=yaw_change;
  
  //  pitch+=pitch_change;
 
  
  //   roll+=roll_change;
    */
    
    try{
      sensor_msgs::Image output_image_cvim =*bridge.cvToImgMsg(output_image, "mono8");
      output_image_cvim.header.stamp=msg->header.stamp;
      analyzed_pub_.publish(output_image_cvim);
    }
    catch (sensor_msgs::CvBridgeException& e){
      ROS_ERROR("Could not convert from '%s' to 'mono8'.", msg->encoding.c_str());
      return;
    }
    // printf("displaying image\n");
  }else{
    // printf("null image_rect\n");
  }
}
Example #5
0
bool is_thick(image_processor::ProcessBridge::Request    &req,
                 image_processor::ProcessBridge::Response &res )
{
    sensor_msgs::Image image = req.image;
    sensor_msgs::Image image2 = req.image2;
    sensor_msgs::ImagePtr img_ptr(new sensor_msgs::Image(image));
    
    sensor_msgs::CameraInfo cam_info = req.info;
    
    IplImage *cv_image;    
    IplImage *cv_image2;   
    try
        {
                cv_image = bridge_.imgMsgToCv(img_ptr, "bgr8");
        }
        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
   
        CvPoint2D64f temp_pts[128];
        double temp_params[128];
        //Crop image

        cvSetImageROI(cv_image,cvRect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
        IplImage* cropped_image = cvCloneImage(cv_image);
        cvSaveImage("/home/stephen/cropped_image1.png",cropped_image);
        sensor_msgs::ImagePtr img2_ptr(new sensor_msgs::Image(image2));
        try{
                cv_image2 = bridge_.imgMsgToCv(img2_ptr, "bgr8");
        }

        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
        cvSetImageROI(cv_image,cvRect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
        IplImage* cropped_image2 = cvCloneImage(cv_image2);
        
        cvSaveImage("/home/stephen/cropped_image2.png",cropped_image2);
        cvReleaseImage(&cropped_image);
        cvReleaseImage(&cropped_image2);
        IplImage* new_cropped_image = cvLoadImage("/home/stephen/cropped_image1.png");
        IplImage* new_cropped_image2 = cvLoadImage("/home/stephen/cropped_image2.png");
        IplImage *output_cv_image;
        int num_pts = 0;
        int num_params = 0;
        output_cv_image = is_thick_process(new_cropped_image,temp_pts,temp_params,num_pts,num_params,new_cropped_image2);
        

        for( int i = 0; i < num_pts; i++){
            res.pts_x.push_back(temp_pts[i].x);
            res.pts_y.push_back(temp_pts[i].y);
        }
        for ( int i = 0; i < num_params; i++){
           res.params.push_back(temp_params[i]);
        }
        sensor_msgs::ImagePtr output_img_ptr;
        sensor_msgs::Image output_img;
        try
        {
                output_img_ptr = bridge_.cvToImgMsg(output_cv_image, "bgr8");
                output_img = *output_img_ptr;
        }
        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
    res.image_annotated = output_img;

    return true;
}
	void image_callback (const sensor_msgs::ImageConstPtr& received_image)
    {
      //if image is not de-bayered yet do it (needed for Prosilica image on PR2 robot)
      if (received_image->encoding.find("bayer") != std::string::npos)
      {
        cv::Mat tmpImage;
        const std::string& raw_encoding = received_image->encoding;
        int raw_type = CV_8UC1;
        if (raw_encoding == sensor_msgs::image_encodings::BGR8 || raw_encoding == sensor_msgs::image_encodings::RGB8)
          raw_type = CV_8UC3;
        const cv::Mat raw(received_image->height, received_image->width, raw_type,
                  const_cast<uint8_t*>(&received_image->data[0]), received_image->step);
        // Convert to color BGR
        int code = 0;
        if (received_image->encoding == sensor_msgs::image_encodings::BAYER_RGGB8)
          code = CV_BayerBG2BGR;
        else if (received_image->encoding == sensor_msgs::image_encodings::BAYER_BGGR8)
          code = CV_BayerRG2BGR;
        else if (received_image->encoding == sensor_msgs::image_encodings::BAYER_GBRG8)
          code = CV_BayerGR2BGR;
        else if (received_image->encoding == sensor_msgs::image_encodings::BAYER_GRBG8)
          code = CV_BayerGB2BGR;
        else
        {
          ROS_ERROR("[image_proc] Unsupported encoding '%s'", received_image->encoding.c_str());
          return;
        }
        cv::cvtColor(raw, tmpImage, code);

        cv::Mat tmp2;
        cv::cvtColor(tmpImage, tmp2, CV_BGR2GRAY);
        camera_image = cvCreateImage(cvSize(800, 600), IPL_DEPTH_8U, 1);
        IplImage ti;
        ti = tmp2;
        cvSetImageROI(&ti, cvRect(300, 300, 1848, 1450));
        cvResize(&ti, camera_image);
      }
      else
      {
        camera_image = bridge.imgMsgToCv(received_image, "mono8");
      }
      printf("\n\n");
      ROS_INFO("Image received!");
      //call main processing function

      if (camera_image->width == 2)
      {
    	  //if (visualization_mode_ == SEQUENCES && sequence_buffer.size() == 0)
    		  //return;

    	  ROS_INFO("Special image for end of sequence detected! Switching to SEQUENCE mode!\n");
    	  visualization_mode_ = SEQUENCES;

    	  //std::sort(sequence_buffer.begin(), sequence_buffer.end());

    	  visualize(camera_image, NULL, NULL);

    	  clear_sequence_buffer();
    	  return;
      }

      std::string result = process_image(camera_image);

      //image_pub_.publish(bridge.cvToImgMsg(image_roi));
      image_pub_.publish(bridge.cvToImgMsg(camera_image));
      ROS_INFO("[ODUFinder:] image published to %s", output_image_topic_.c_str());
      if (image_roi != NULL)
    	  cvReleaseImage(&image_roi);

      //msg.data = name.substr(0, name.find_last_of('.')).c_str();
      //publish the result to http://www.ros.org/wiki/cop
      if (result.length() > 0)
      {
        vision_msgs::cop_answer msg;
        vision_msgs::cop_descriptor cop_descriptor;
        vision_msgs::aposteriori_position aposteriori_position;
        msg.found_poses.push_back(aposteriori_position);
        msg.found_poses[0].models.push_back(cop_descriptor);
        //TODO: only one needed probably, ask Moritz
        msg.found_poses[0].models[0].type = "ODUFinder";
        msg.found_poses[0].models[0].sem_class = result;
        msg.found_poses[0].models[0].object_id = ++object_id;
        msg.found_poses[0].position = 0;
        template_publisher.publish(msg);
      }
    }