// topic callback functions 
        // function will be called when a new message arrives on a topic
        void syncCallback(const sensor_msgs::Image::ConstPtr& tof_camera_xyz_data, const sensor_msgs::Image::ConstPtr& tof_camera_grey_data)
        {
            ROS_DEBUG("convert xyz_image to point_cloud");
            sensor_msgs::PointCloud pc_msg;
			// create point_cloud message
			pc_msg.header.stamp = ros::Time::now();
			pc_msg.header.frame_id = "head_tof_camera_link";
			c_xyz_image_32F3_ = cv_bridge_0_.imgMsgToCv(tof_camera_xyz_data, "passthrough");
			c_grey_image_32F1_ = cv_bridge_1_.imgMsgToCv(tof_camera_grey_data, "passthrough");
			cv::Mat cpp_xyz_image_32F3 = c_xyz_image_32F3_;
			cv::Mat cpp_grey_image_32F1 = c_grey_image_32F1_;
			
			float* f_ptr = 0;
			for (int row = 0; row < cpp_xyz_image_32F3.rows; row++)
			{
				f_ptr = cpp_xyz_image_32F3.ptr<float>(row);
				for (int col = 0; col < cpp_xyz_image_32F3.cols; col++)
				{
					geometry_msgs::Point32 pt;
					pt.x = f_ptr[3*col + 0];
					pt.y = f_ptr[3*col + 1];
					pt.z = f_ptr[3*col + 2];
					pc_msg.points.push_back(pt); 
				}
			}
			
			pc_msg.header.stamp = ros::Time::now();
            
            topicPub_pointCloud_.publish(pc_msg);
        }
Esempio n. 2
0
	/// Callback is executed, when shared mode is selected.
	/// Left and right is expressed when facing the back of the camera in horitontal orientation.
	void sharedModeSrvCallback(const sensor_msgs::ImageConstPtr& right_camera_data,
			const sensor_msgs::ImageConstPtr& tof_camera_grey_data)
	{
		boost::mutex::scoped_lock lock(m_ServiceMutex);
		ROS_INFO("[all_camera_viewer] sharedModeSrvCallback");
		// Convert ROS image messages to openCV IplImages
		try
		{
			right_color_image_8U3_ = cv_bridge_0_.imgMsgToCv(right_camera_data, "passthrough");
			grey_image_32F1_ = cv_bridge_2_.imgMsgToCv(tof_camera_grey_data, "passthrough");

			cv::Mat tmp = right_color_image_8U3_;
			right_color_mat_8U3_ = tmp.clone();

			tmp = grey_image_32F1_;
			grey_mat_32F1_ = tmp.clone();
		}
		catch (sensor_msgs::CvBridgeException& e)
		{
			ROS_ERROR("[all_camera_viewer] Could not convert images with cv_bridge.");
		}
			
		ipa_Utils::ConvertToShowImage(grey_mat_32F1_, grey_mat_8U3_, 1);
		cv::imshow("TOF grey data", grey_mat_8U3_);

		cv::Mat right_color_8U3;
		cv::resize(right_color_mat_8U3_, right_color_8U3, cv::Size(), 0.5, 0.5);
		cv::imshow("Right color data", right_color_8U3);
		cv::waitKey(1000);
	}
  void imageCallback (const sensor_msgs::ImageConstPtr & msg_ptr)
  {
    // Convert ROS Imput Image Message to IplImage
    try
    {
      cv_input_ = bridge_.imgMsgToCv (msg_ptr, "bgr8");
    }
    catch (sensor_msgs::CvBridgeException error)
    {
      ROS_ERROR ("CvBridge Input Error");
    }

    // Convert IplImage to cv::Mat
    img_in_ = cv::Mat (cv_input_).clone ();
    // Convert Input image from BGR to HSV
    cv::cvtColor (img_in_, img_hsv_, CV_BGR2HSV);
    // Display HSV Image in HighGUI window
    cv::imshow ("hsv", img_hsv_);

    // Needed to  keep the HighGUI window open
    cv::waitKey (3);

    // Convert cv::Mat to IplImage
    cv_output_ = img_hsv_;
    // Convert IplImage to ROS Output Image Message and Publish
    try
    {
      image_pub_.publish (bridge_.cvToImgMsg (&cv_output_, "bgr8"));
    }
    catch (sensor_msgs::CvBridgeException error)
    {
      ROS_ERROR ("CvBridge Output error");
    }
  }
Esempio n. 4
0
	/// Callback is executed, when stereo mode is selected
	/// Left and right is expressed when facing the back of the camera in horizontal orientation.
	void stereoModeSrvCallback(const sensor_msgs::ImageConstPtr& left_camera_data,
			const sensor_msgs::ImageConstPtr& right_camera_data)
	{
		ROS_INFO("[all_camera_viewer] stereoModeSrvCallback");
		boost::mutex::scoped_lock lock(m_ServiceMutex);
		// Convert ROS image messages to openCV IplImages
		try
		{
			right_color_image_8U3_ = cv_bridge_0_.imgMsgToCv(right_camera_data, "passthrough");
			left_color_image_8U3_ = cv_bridge_1_.imgMsgToCv(left_camera_data, "passthrough");

			cv::Mat tmp = right_color_image_8U3_;
			right_color_mat_8U3_ = tmp.clone();

			tmp = left_color_image_8U3_;
			left_color_mat_8U3_ = tmp.clone();
		}
		catch (sensor_msgs::CvBridgeException& e)
		{
			ROS_ERROR("[all_camera_viewer] Could not convert stereo images with cv_bridge.");
		}
		
		cv::Mat right_color_8U3;
		cv::resize(right_color_mat_8U3_, right_color_8U3, cv::Size(), 0.5, 0.5);
		cv::imshow("Right color data", right_color_8U3);
		
		cv::Mat left_color_8U3;
		cv::resize(left_color_mat_8U3_, left_color_8U3, cv::Size(), 0.5, 0.5);
		cv::imshow("Left color data", left_color_8U3);
		cv::waitKey(1000);

		ROS_INFO("[all_camera_viewer] stereoModeSrvCallback [OK]");
	}
Esempio n. 5
0
bool find_grip_point(image_processor::ProcessBridge::Request    &req,
                 image_processor::ProcessBridge::Response &res )
{
    sensor_msgs::Image image = req.image;
    sensor_msgs::ImagePtr img_ptr(new sensor_msgs::Image(image));
    sensor_msgs::CameraInfo cam_info = req.info;
    
    IplImage *cv_image = NULL;    
    try
        {
                cv_image = bridge_.imgMsgToCv(img_ptr, "bgr8");
        }
        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
        
        //Crop image
        cvSetImageROI(cv_image,cvRect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
        IplImage* cropped_image = cvCloneImage(cv_image);
        CvPoint2D64f temp_pts[128];
        double temp_params[128];
        
        IplImage *output_cv_image;
        int num_pts = 0;
        int num_params = 0;
        
        ROS_INFO("Ready to call find_grip_point...");
        output_cv_image = find_grip_point_process(cropped_image,temp_pts,temp_params,num_pts,num_params);
        

        for( int i = 0; i < num_pts; i++){
            res.pts_x.push_back(temp_pts[i].x+CROP_X);
            res.pts_y.push_back(temp_pts[i].y+CROP_Y);
        }
        for ( int i = 0; i < num_params; i++){
           res.params.push_back(temp_params[i]);
        }
        sensor_msgs::ImagePtr output_img_ptr;
        sensor_msgs::Image output_img;
        try
        {
                output_img_ptr = bridge_.cvToImgMsg(output_cv_image, "bgr8");
                output_img = *output_img_ptr;
        }
        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
    res.image_annotated = output_img;

    return true;
}
void ImageNodelet::imageCb(const sensor_msgs::ImageConstPtr& msg)
{
  image_mutex_.lock();

  // May want to view raw bayer data, which CvBridge doesn't know about
  if (msg->encoding.find("bayer") != std::string::npos)
  {
    last_image_ = cv::Mat(msg->height, msg->width, CV_8UC1,
                          const_cast<uint8_t*>(&msg->data[0]), msg->step);
  }
  // We want to scale floating point images so that they display nicely
  else if(msg->encoding.find("F") != std::string::npos)
  {
    cv::Mat float_image_bridge = img_bridge_.imgMsgToCv(msg, "passthrough");
    cv::Mat_<float> float_image = float_image_bridge;
    float max_val = 0;
    for(int i = 0; i < float_image.rows; ++i)
    {
      for(int j = 0; j < float_image.cols; ++j)
      {
        max_val = std::max(max_val, float_image(i, j));
      }
    }

    if(max_val > 0)
    {
      float_image /= max_val;
    }
    last_image_ = float_image;
  }
  else
  {
    // Convert to OpenCV native BGR color
    try {
      last_image_ = img_bridge_.imgMsgToCv(msg, "bgr8");
    }
    catch (sensor_msgs::CvBridgeException& e) {
      NODELET_ERROR_THROTTLE(30, "Unable to convert '%s' image to bgr8",
                             msg->encoding.c_str());
    }
  }

  // last_image_ may point to data owned by last_msg_, so we hang onto it for
  // the sake of mouseCb.
  last_msg_ = msg;

  // Must release the mutex before calling cv::imshow, or can deadlock against
  // OpenCV's window mutex.
  image_mutex_.unlock();
  if (!last_image_.empty())
    cv::imshow(window_name_, last_image_);
}
Esempio n. 7
0
bool add_sock_to_match(image_processor::ProcessBridge::Request    &req,
                 image_processor::ProcessBridge::Response &res )
{
    sensor_msgs::Image image = req.image;
    sensor_msgs::Image image2 = req.image2;
    sensor_msgs::ImagePtr img_ptr(new sensor_msgs::Image(image));

    IplImage *cv_image;    
    IplImage *cv_image2;   
    try
        {
                cv_image = bridge_.imgMsgToCv(img_ptr, "bgr8");
        }
        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
   
        CvPoint2D64f temp_pts[128];
        double temp_params[128];
        //Crop image

        cvSetImageROI(cv_image,cvRect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
        IplImage* cropped_image = cvCloneImage(cv_image);
        cvSaveImage("/home/stephen/cropped_image1.png",cropped_image);
        sensor_msgs::ImagePtr img2_ptr(new sensor_msgs::Image(image2));
        try{
                cv_image2 = bridge_.imgMsgToCv(img2_ptr, "bgr8");
        }

        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
        cvSetImageROI(cv_image,cvRect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
        IplImage* cropped_image2 = cvCloneImage(cv_image2);
        
        cvSaveImage("/home/stephen/cropped_image2.png",cropped_image2);
        cvReleaseImage(&cropped_image);
        cvReleaseImage(&cropped_image2);
        IplImage* new_cropped_image = cvLoadImage("/home/stephen/cropped_image1.png");
        IplImage* new_cropped_image2 = cvLoadImage("/home/stephen/cropped_image2.png");
        match_detector->addImageToList(new_cropped_image,new_cropped_image2);
        res.image_annotated = req.image;

    return true;
}
Esempio n. 8
0
bool match_socks(image_processor::MatchSocks::Request    &req,
                 image_processor::MatchSocks::Response &res )
{
    match_detector = new MatchDetector();
    int num_images = req.images1.size();
    
    for (int i = 0; i < num_images; i++){
        sensor_msgs::Image image1 = req.images1.at(i);
        sensor_msgs::Image image2 = req.images2.at(i);
        sensor_msgs::ImagePtr img_ptr1(new sensor_msgs::Image(image1));
        sensor_msgs::ImagePtr img_ptr2(new sensor_msgs::Image(image2));
        IplImage *cv_image1; 
        IplImage *cv_image2;
        try{
            cv_image1 = bridge_.imgMsgToCv(img_ptr1, "bgr8");
            cv_image2 = bridge_.imgMsgToCv(img_ptr2, "bgr8");
            match_detector->addImageToList(cv_image1,cv_image2);
        }
        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
        
    }
    vector<int> matches = match_detector->process();
    for(int i = 0; i < num_images; i++){
        res.matches.push_back(matches.at(i));
    }
    return true;
    
}
Esempio n. 9
0
void alpha_image_cb(const sensor_msgs::ImageConstPtr& msg_ptr){


    calc_and_publish_BWMask(msg_ptr->header.stamp, msg_ptr->header.frame_id);
    IplImage* cam_image = bridge.imgMsgToCv(msg_ptr);
    IplImage* cam_alpha_image = cvCreateImage(cvGetSize(cam_image), IPL_DEPTH_8U, 4);

    //b
    cvSetImageCOI(cam_alpha_image, 1);
    cvSetImageCOI(cam_image, 1);
    cvCopy(cam_image, cam_alpha_image);

    //g
    cvSetImageCOI(cam_alpha_image, 2);
    cvSetImageCOI(cam_image, 2);
    cvCopy(cam_image, cam_alpha_image);

    //r
    cvSetImageCOI(cam_alpha_image, 3);
    cvSetImageCOI(cam_image, 3);
    cvCopy(cam_image, cam_alpha_image);

    //alpha
    cvSetImageCOI(cam_alpha_image, 4);
    cvCopy(ipl_maskBW, cam_alpha_image);
    cvSetImageCOI(cam_alpha_image, 0);


    sensor_msgs::ImagePtr img_msg = sensor_msgs::CvBridge::cvToImgMsg(cam_alpha_image);
    img_msg->header = msg_ptr->header;
    image_publisher.publish(img_msg);
    cvReleaseImage(&cam_alpha_image);


}
Esempio n. 10
0
    /// Callback is executed, when shared mode is selected
    /// Left and right is expressed when facing the back of the camera in horizontal orientation.
    void colorImageCallback(const sensor_msgs::ImageConstPtr& color_camera_data,
                            const sensor_msgs::CameraInfoConstPtr& color_camera_info)
    {
        {
            boost::mutex::scoped_lock lock( mutexQ_ );

            ROS_DEBUG("[fiducials] color image callback");

            if (camera_matrix_initialized_ == false)
            {
                camera_matrix_ = cv::Mat::zeros(3,3,CV_64FC1);
                camera_matrix_.at<double>(0,0) = color_camera_info->K[0];
                camera_matrix_.at<double>(0,2) = color_camera_info->K[2];
                camera_matrix_.at<double>(1,1) = color_camera_info->K[4];
                camera_matrix_.at<double>(1,2) = color_camera_info->K[5];
                camera_matrix_.at<double>(2,2) = 1;

                ROS_INFO("[fiducials] Initializing fiducial detector with camera matrix");
                if (m_pi_tag->Init(camera_matrix_, model_directory_ + model_filename_) & ipa_Utils::RET_FAILED)
                {
                    ROS_ERROR("[fiducials] Initializing fiducial detector with camera matrix [FAILED]");
                    return;
                }

                camera_matrix_initialized_ = true;
            }

            // Receive
            color_image_8U3_ = cv_bridge_0_.imgMsgToCv(color_camera_data, "bgr8");
            received_timestamp_ = color_camera_data->header.stamp;
            received_frame_id_ = color_camera_data->header.frame_id;
            cv::Mat tmp = color_image_8U3_;
            color_mat_8U3_ = tmp.clone();

            if (ros_node_mode_ == MODE_TOPIC || ros_node_mode_ == MODE_TOPIC_AND_SERVICE)
            {
                cob_object_detection_msgs::DetectionArray detection_array;
                detectFiducials(detection_array, color_mat_8U3_);

                // Publish
                detect_fiducials_pub_.publish(detection_array);

                cv_bridge::CvImage cv_ptr;
                cv_ptr.image = color_mat_8U3_;
                cv_ptr.encoding = CobFiducialsNode::color_image_encoding_;
                img2D_pub_.publish(cv_ptr.toImageMsg());
            }

            synchronizer_received_ = true;

            // Notify waiting thread
        }
        condQ_.notify_one();
    }
Esempio n. 11
0
    void process_images(const sensor_msgs::ImageConstPtr& camera_msg, const sensor_msgs::ImageConstPtr& fg_objects_msg, const sensor_msgs::ImageConstPtr& saliency_msg)
    {
        ROS_INFO_STREAM("Raw image time:          " << camera_msg->header.stamp);
        ROS_INFO_STREAM("Foreground objects time: " << fg_objects_msg->header.stamp);
        ROS_INFO_STREAM("Saliency time:           " << saliency_msg->header.stamp);

        cv::Mat camera_img(camera_bridge.imgMsgToCv(camera_msg));
        cv::Mat fg_objects_img(fg_objects_bridge.imgMsgToCv(fg_objects_msg));
        cv::Mat saliency_img(saliency_bridge.imgMsgToCv(saliency_msg));

        double w = -1/5.0, b = 4.0;
        cv::Mat fg_prob_img = fg_objects_img;
        fg_prob_img.convertTo(fg_prob_img, fg_prob_img.type(), w, b);
        cv::exp(fg_prob_img, fg_prob_img);
        fg_prob_img.convertTo(fg_prob_img, fg_prob_img.type(), 1, 1);
        cv::divide(1.0, fg_prob_img, fg_prob_img);

        cv::namedWindow("fg_prob_img");
        cv::imshow("fg_prob_img", fg_prob_img);

        double sum = cv::sum(fg_prob_img)[0];
    }
 void image_cb(const sensor_msgs::ImageConstPtr &msg)
 {
   //ROS_INFO("checking for pancakes");
   IplImage *cv_image = NULL;
   try
   {
     cv_image = bridge.imgMsgToCv(msg, "mono8");
   }
   catch (sensor_msgs::CvBridgeException error)
   {
     ROS_ERROR("bridge error");
     return;
   }
   detector.process_image(cv_image);
 }
Esempio n. 13
0
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
  cvZero(edges);
  IplImage *img_base = bridge.imgMsgToCv(msg, "mono8"),
  *eigI = cvCreateImage(cvSize(640,480), 32, 1),
  *tempI = cvCreateImage(cvSize(640,480), 32, 1);

  cvGoodFeaturesToTrack(img_base, eigI,tempI, corners,&corner_count, quality_level,min_distance);

  for(i=0; i<max; i++)
  {
	cvCircle(img_base, cvPoint((int)corners[i].x,(int)corners[i].y), 4,cvScalar(255),1);
  }

  //send_array();
  cvShowImage("Lines",img_base);
  cvReleaseImage(&eigI); cvReleaseImage(&tempI);
}
Esempio n. 14
0
void createRequest(re_vision::SearchFor &srv, const cv::Mat &image)
{
	//IplImage *img = cvCloneImage(&IplImage(image));
	IplImage img(image);
  
	try{
		sensor_msgs::Image::Ptr ros_img_ptr = m_bridge.cvToImgMsg(&img, "passthrough");
		srv.request.Image = sensor_msgs::Image(*ros_img_ptr);
    
	}catch (sensor_msgs::CvBridgeException error){
		ROS_ERROR("error in createRequest");
	}

	srv.request.Objects.clear();
	for(unsigned int j = 0; j < m_object_list.size(); ++j)
		srv.request.Objects.push_back(m_object_list[j]);
  
	srv.request.MaxPointsPerObject = 3;
}
Esempio n. 15
0
// This is called whenever the node gets a new image from the camera
void DemoNode::imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
  // Convert image from ROS format to OpenCV format
  static sensor_msgs::CvBridge bridge;
  static bool init = false;
  static vector<Point2f> viewCorners,foundPoints;
  cv::Mat image;
  try
  {
	image = cv::Mat(bridge.imgMsgToCv(msg, "bgr8"));
	if(!init){
		// Store the corners of the image for transformation purposes
		viewCorners.push_back(Point2f(0,0));
		viewCorners.push_back(Point2f(0,image.size().width-1));
		viewCorners.push_back(Point2f(image.size().height-1,image.size().width-1));
		viewCorners.push_back(Point2f(image.size().height-1,0));
	}
	
	transformPoints(viewCorners,viewCloud);
	pub_view_pts.publish(viewCloud);
	
	// Detect orange points in the image
	foundPoints.clear();
	findPoints(image,foundPoints);
	if(foundPoints.size() > 1){
		// Transform and publish points if you got any
		transformPoints(foundPoints,pointCloud);
		pub_nav_pts.publish(pointCloud);
	}
	else{
		//ROS_INFO("Camera: nothing detected");
	}
  }
  catch (sensor_msgs::CvBridgeException& e)
  {
    ROS_ERROR("Could not convert from '%s' to 'bgr8'. E was %s", msg->encoding.c_str(), e.what());
  }

}
	/// Topic callback functions. 
	/// Function will be called when a new message arrives on a topic.
	/// @param grey_image_msg The gray values of point cloud, saved in a 32bit, 1 channel OpenCV IplImage
	void greyImageCallback(const sensor_msgs::ImageConstPtr& grey_image_msg)
	{
		/// Do not release <code>m_GrayImage32F3</code>
		/// Image allocation is managed by Cv_Bridge object 

		try
		{
			grey_image_32F1_ = cv_bridge_0_.imgMsgToCv(grey_image_msg, "passthrough");

			if (grey_image_8U3_ == 0)
			{
				grey_image_8U3_ = cvCreateImage(cvGetSize(grey_image_32F1_), IPL_DEPTH_8U, 3);
			}

			ipa_Utils::ConvertToShowImage(grey_image_32F1_, grey_image_8U3_, 1, 0, 700);
			cvShowImage("gray data", grey_image_8U3_);
		}
		catch (sensor_msgs::CvBridgeException& e)
		{
			ROS_ERROR("[tof_camera_viewer] Could not convert from '%s' to '32FC1'.", grey_image_msg->encoding.c_str());
		}
	}
Esempio n. 17
0
	/// Topic callback functions. 
	/// Function will be called when a new message arrives on a topic.
	/// @param grey_image_msg The gray values of point cloud, saved in a 32bit, 1 channel OpenCV IplImage
	void greyImageCallback(const sensor_msgs::ImageConstPtr& grey_image_msg)
	{
		/// Do not release <code>m_GrayImage32F3</code>
		/// Image allocation is managed by Cv_Bridge object 
		ROS_INFO("Grey Image Callback");

		try
		{
			grey_image_32F1_ = cv_bridge_0_.imgMsgToCv(grey_image_msg, "passthrough");

			if (grey_image_8U3_ == 0)
			{
				grey_image_8U3_ = cvCreateImage(cvGetSize(grey_image_32F1_), IPL_DEPTH_8U, 3);
			}

			ipa_Utils::ConvertToShowImage(grey_image_32F1_, grey_image_8U3_, 1, 0, 800);
			cvShowImage("grey data", grey_image_8U3_);
			usleep(100);
			int c = cvWaitKey(50);
			if (c=='s' || c==536871027)
			{
				std::stringstream ss;
				char counterBuffer [50];
				sprintf(counterBuffer, "%04d", grey_image_counter_);
				ss << "greyImage8U3_";
				ss << counterBuffer;
				ss << ".bmp";
				cvSaveImage(ss.str().c_str(),grey_image_8U3_);
				std::cout << "Image " << grey_image_counter_ << " saved." << std::endl;
				grey_image_counter_++;
			}
		}
		catch (sensor_msgs::CvBridgeException& e)
		{
			ROS_ERROR("[tof_camera_viewer] Could not convert from '%s' to '32FC1'.", grey_image_msg->encoding.c_str());
		}
		ROS_INFO("Image Processed");
	}
Esempio n. 18
0
	/// Topic callback functions. 
	/// Function will be called when a new message arrives on a topic.
	/// @param xyz_image_msg The point cloud, saved in a 32bit, 3 channel OpenCV IplImage
	void xyzImageCallback(const sensor_msgs::ImageConstPtr& xyz_image_msg)
	{
		/// Do not release <code>xyz_image_32F3_</code>
		/// Image allocation is managed by Cv_Bridge object 

		try
		{
			xyz_image_32F3_ = cv_bridge_1_.imgMsgToCv(xyz_image_msg, "passthrough");
			
			if (xyz_image_8U3_ == 0)
			{
				xyz_image_8U3_ = cvCreateImage(cvGetSize(xyz_image_32F3_), IPL_DEPTH_8U, 3);
			}

			ipa_Utils::ConvertToShowImage(xyz_image_32F3_, xyz_image_8U3_, 3);
			cvShowImage("z data", xyz_image_8U3_);
		}
		catch (sensor_msgs::CvBridgeException& e)
		{
			ROS_ERROR("[tof_camera_viewer] Could not convert from '%s' to '32FC1'.", xyz_image_msg->encoding.c_str());
		}

	}
Esempio n. 19
0
void ImageGrabber::topicImageRawGot(const sensor_msgs::Image::ConstPtr& msg)
{
  ROS_WARN("/image_raw got");
  
  static int i = -1;
  i++;
  
  re_vision::SearchFor srv;
	
	IplImage * img_ipl = 
	  m_bridge.imgMsgToCv(msg, "passthrough");
	cv::Mat image = cv::Mat(img_ipl).clone();

	createRequest(srv, image);
	
	stringstream ss;
	for(unsigned int k = 0; k < srv.request.Objects.size(); ++k)
	{
	  ss << srv.request.Objects[k] << " ";
	}
	ROS_INFO("Sending request to find object(s): %s", ss.str().c_str());

	if (m_client.call(srv))
	{
		ROS_INFO("Request %d made ok", i);
		showResponse(srv);
		
		//if(srv.response.state.state == re_msgs::State::READY){
			storeResponse(srv, image, i);

		//}
		
	}else{
		ROS_ERROR("Failed to call service re_vision::SearchFor (%d)", i);
	}
	
}
Esempio n. 20
0
void MapMaker::image_callback(const sensor_msgs::ImageConstPtr& msg) {
//  printf("callback called\n");
  try
	{
	
	// if you want to work with color images, change from mono8 to bgr8
	  if(input_image==NULL){
		  input_image = cvCloneImage(bridge.imgMsgToCv(msg, "mono8"));
		  rotationImage=cvCloneImage(input_image);
		 // printf("cloned image\n");
		}
		else{
		  cvCopy(bridge.imgMsgToCv(msg, "mono8"),input_image);
		 // printf("copied image\n");
		}
	}
	catch (sensor_msgs::CvBridgeException& e)
	{
		ROS_ERROR("Could not convert from '%s' to 'mono8'.", msg->encoding.c_str());
		return;
	}
	
	if(input_image!=NULL) {
    //get tf transform here and put in map
    ros::Time acquisition_time = msg->header.stamp;
    geometry_msgs::PoseStamped basePose;
    geometry_msgs::PoseStamped mapPose;
    basePose.pose.orientation.w=1.0;
    ros::Duration timeout(3);
    basePose.header.frame_id="/base_link";
    mapPose.header.frame_id="/map";
    try {
      tf_listener_.waitForTransform("/base_link", "/map", acquisition_time, timeout);
       
      tf_listener_.transformPose("/map", acquisition_time,basePose,"/base_link",mapPose);
	    
	    printf("pose #%d %f %f %f\n",pic_number,mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
	    
	    
	    /*
	    char buffer [50];
	    sprintf (buffer, "/tmp/test%02d.jpg", pic_number);
			if(!cvSaveImage(buffer,input_image,0)) printf("Could not save: %s\n",buffer);
			else printf("picture taken!!!\n");
	    pic_number++;
	    */
	    
	    cv::Point_<double> center;
      center.x=input_image->width/2;
      center.y=input_image->height/2;
      double tranlation_arr[2][3];
      CvMat translation;
      cvInitMatHeader(&translation,2,3,CV_64F,tranlation_arr);
      
      cvSetZero(&translation);
      cv2DRotationMatrix(center, (tf::getYaw(mapPose.pose.orientation)*180/3.14159) -90,1.0,&translation);
      cvSetZero(rotationImage);
      cvWarpAffine(input_image,rotationImage,&translation,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0));
      
      
      CvRect roi;
      roi.width=rotationImage->width;
      roi.height=rotationImage->height;
      
      if(init_zero_x==0){
        init_zero_x=(int)(mapPose.pose.position.x*(1.0/map_meters_per_pixel));
        init_zero_y=(int)(mapPose.pose.position.y*(-1.0/map_meters_per_pixel));
      }
      
      roi.x=(int)(mapPose.pose.position.x*(1.0/map_meters_per_pixel))-init_zero_x+map_zero_x-roi.width/2;
      roi.y=(int)(mapPose.pose.position.y*(-1.0/map_meters_per_pixel))-init_zero_y+map_zero_y-roi.height/2;
      
      printf("x %d, y %d, rot %f\n",roi.x,roi.y, (tf::getYaw(mapPose.pose.orientation)*180/3.14159) -90);
      
      cvSetImageROI(map,roi);
      
      cvMax(map,rotationImage,map);
      
      cvResetImageROI(map);
	    cvShowImage("map image",map);	    
    }
    catch (tf::TransformException& ex) {
      ROS_WARN("[map_maker] TF exception:\n%s", ex.what());
      printf("[map_maker] TF exception:\n%s", ex.what());
      return;
    }
    catch(...){
      printf("opencv shit itself cause our roi is bad\n");
    }
  }
}
Esempio n. 21
0
  void imageCb(const sensor_msgs::ImageConstPtr& l_image,
               const sensor_msgs::CameraInfoConstPtr& l_cam_info,
               const sensor_msgs::ImageConstPtr& r_image,
               const sensor_msgs::CameraInfoConstPtr& r_cam_info)
  {
    ROS_INFO("In callback, seq = %u", l_cam_info->header.seq);
    
    // Convert ROS messages for use with OpenCV
    cv::Mat left, right;
    try {
      left  = l_bridge_.imgMsgToCv(l_image, "mono8");
      right = r_bridge_.imgMsgToCv(r_image, "mono8");
    }
    catch (sensor_msgs::CvBridgeException& e) {
      ROS_ERROR("Conversion error: %s", e.what());
      return;
    }
    cam_model_.fromCameraInfo(l_cam_info, r_cam_info);

    frame_common::CamParams cam_params;
    cam_params.fx = cam_model_.left().fx();
    cam_params.fy = cam_model_.left().fy();
    cam_params.cx = cam_model_.left().cx();
    cam_params.cy = cam_model_.left().cy();
    cam_params.tx = cam_model_.baseline();

    if (vslam_system_.addFrame(cam_params, left, right)) {
      /// @todo Not rely on broken encapsulation of VslamSystem here
      int size = vslam_system_.sba_.nodes.size();
      if (size % 2 == 0) {
        // publish markers
        sba::drawGraph(vslam_system_.sba_, cam_marker_pub_, point_marker_pub_);
      }

      // Publish VO tracks
      if (vo_tracks_pub_.getNumSubscribers() > 0) {
        frame_common::drawVOtracks(left, vslam_system_.vo_.frames, vo_display_);
        IplImage ipl = vo_display_;
        sensor_msgs::ImagePtr msg = sensor_msgs::CvBridge::cvToImgMsg(&ipl);
        msg->header = l_cam_info->header;
        vo_tracks_pub_.publish(msg, l_cam_info);
      }
      
      // Refine large-scale SBA.
      const int LARGE_SBA_INTERVAL = 10;
      if (size > 4 && size % LARGE_SBA_INTERVAL == 0) {
        ROS_INFO("Running large SBA on %d nodes", size);
        vslam_system_.refine();
      }
      
      if (pointcloud_pub_.getNumSubscribers() > 0 && size % 2 == 0)
        publishRegisteredPointclouds(vslam_system_.sba_, vslam_system_.frames_, pointcloud_pub_);
      
      // Publish odometry data to tf.
      if (0) // TODO: Change this to parameter.
      {
        ros::Time stamp = l_cam_info->header.stamp;
        std::string image_frame = l_cam_info->header.frame_id;
        Eigen::Vector4d trans = -vslam_system_.sba_.nodes.back().trans;
        Eigen::Quaterniond rot = vslam_system_.sba_.nodes.back().qrot.conjugate();
        
        trans.head<3>() = rot.toRotationMatrix()*trans.head<3>(); 
        
        tf_transform_.setOrigin(tf::Vector3(trans(0), trans(1), trans(2)));
        tf_transform_.setRotation(tf::Quaternion(rot.x(), rot.y(), rot.z(), rot.w()) );
        
        tf::Transform simple_transform;
        simple_transform.setOrigin(tf::Vector3(0, 0, 0));
        simple_transform.setRotation(tf::Quaternion(.5, -.5, .5, .5));
        
        tf_broadcast_.sendTransform(tf::StampedTransform(tf_transform_, stamp, image_frame, "visual_odom"));
        tf_broadcast_.sendTransform(tf::StampedTransform(simple_transform, stamp, "visual_odom", "pgraph"));
      
      
        // Publish odometry data on topic.
        if (odom_pub_.getNumSubscribers() > 0)
        {
          tf::StampedTransform base_to_image;
          tf::Transform base_to_visodom;
         
          try
          {
            tf_listener_.lookupTransform(image_frame, "/base_footprint",
                                 stamp, base_to_image);
          }
          catch (tf::TransformException ex)
          {
              ROS_WARN("%s",ex.what());
              return;
          }
                                 
          base_to_visodom = tf_transform_.inverse() * base_to_image;
          
          geometry_msgs::PoseStamped pose;
          nav_msgs::Odometry odom;
          pose.header.frame_id = "/visual_odom";
          pose.pose.position.x = base_to_visodom.getOrigin().x();
          pose.pose.position.y = base_to_visodom.getOrigin().y();
          pose.pose.position.z = base_to_visodom.getOrigin().z();
          pose.pose.orientation.x = base_to_visodom.getRotation().x();
          pose.pose.orientation.y = base_to_visodom.getRotation().y();
          pose.pose.orientation.z = base_to_visodom.getRotation().z();
          pose.pose.orientation.w = base_to_visodom.getRotation().w();
          
          odom.header.stamp = stamp;
          odom.header.frame_id = "/visual_odom";
          odom.child_frame_id = "/base_footprint";
          odom.pose.pose = pose.pose;
          /* odom.pose.covariance[0] = 1;
          odom.pose.covariance[7] = 1;
          odom.pose.covariance[14] = 1;
          odom.pose.covariance[21] = 1;
          odom.pose.covariance[28] = 1;
          odom.pose.covariance[35] = 1; */
          odom_pub_.publish(odom);
        }
      }
    }
  }
    /////////////////////////////////////////////////////////////////////
    // Image callback
    void imageCBAll(const sensor_msgs::Image::ConstPtr &left, const sensor_msgs::CameraInfo::ConstPtr &left_info, const sensor_msgs::Image::ConstPtr &disp, const sensor_msgs::CameraInfo::ConstPtr &right_info)
    {
      // Convert the image to OpenCV
      IplImage *cv_image_left = NULL;
      try
      {
	cv_image_left = lbridge_.imgMsgToCv(left,"mono8");
      }
      catch(sensor_msgs::CvBridgeException error)
      {
	ROS_ERROR("CvBridge: Error converting image");
      }

      IplImage *cv_image_disp = NULL;
      try
      {
	cv_image_disp = dbridge_.imgMsgToCv(disp,"mono16");
      }
      catch(sensor_msgs::CvBridgeException error)
      {
	ROS_ERROR("CvBridge: Error converting disparity");
      }

      // TODO: Finish this function
      
      // TODO: Preprocess the image for the detector

      cv::Mat img(cv_image_left,false);
      cv::Mat *small_img;
      std::vector<cv::Rect> found;
      if (!use_depth_) {

	// Timing
	struct timeval timeofday;
	gettimeofday(&timeofday,NULL);
	ros::Time startt = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);


	// Run the HOG detector. Similar to samples/peopledetect.cpp.
	hog_.detectMultiScale(img, found, hit_threshold_, cv::Size(8,8), cv::Size(24,16), 1.05, group_threshold_);
	small_img = &img;

	// Timing
	gettimeofday(&timeofday,NULL);
	ros::Time endt = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);
	ros::Duration diff = endt-startt;
	ROS_DEBUG_STREAM_NAMED("pedestrian_detector","No depth, duration " << diff.toSec() );

      }
      else {

	ROS_ERROR("Height-based context for the pedestrian detector is not currently available.");
#if 0
	// Convert the stereo calibration into a camera model. Only done once.
	if (!cam_model_) {
	  double Fx = right_info->P[0];
	  double Fy = right_info->P[5];
	  double Clx = right_info->P[2];
	  double Crx = Clx;
	  double Cy = right_info->P[6];
	  double Tx = -right_info->P[3]/Fx;
	  cam_model_ = new CvStereoCamModel(Fx,Fy,Tx,Clx,Crx,Cy,1.0/16.0);
	}

	// Timing
	struct timeval timeofday;
	gettimeofday(&timeofday,NULL);
	ros::Time startt = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);

	////////////////////////////////
	// Get a 3d point for each pixel

	CvMat *uvd = cvCreateMat(img.cols*img.rows,3,CV_32FC1);
	CvMat *xyz = cvCreateMat(img.cols*img.rows,3,CV_32FC1);
	// Convert image uvd to world xyz.
	float *fptr = (float*)(uvd->data.ptr);
	ushort *cptr;
	for (int v =0; v < img.rows; ++v) {
	  cptr = (ushort*)(cv_image_disp->imageData+v*cv_image_disp->widthStep);
	  for (int u=0; u<img.cols; ++u) {
	    (*fptr) = (float)u; ++fptr;
	    (*fptr) = (float)v; ++fptr;
	    (*fptr) = (float)(*cptr); ++fptr; ++cptr;      
	  }
	}
	cam_model_->dispToCart(uvd,xyz);
	//CvMat *y = cvCreateMat(img.cols*img.rows,1,CV_32FC1);
	//CvMat *z = cvCreateMat(img.cols*img.rows,1,CV_32FC1);
	//cvSplit(xyz, NULL, y, z, NULL);
	cv::Mat xyzmat(xyz,false);
	//cv::Mat zmap(z,false);

	if (use_height_) {
	  // Remove the ceiling
	  int top_row;
	  removeCeiling(img, xyzmat, left->header.frame_id, left->header.stamp, max_height_m_, top_row);
	  small_img = new cv::Mat(img, cv::Rect(cv::Point(0, top_row), cv::Size(img.cols, img.rows-top_row)));
	}
	else {
	  small_img = &img;
	}

	hog_.detectMultiScale(*small_img, found, hit_threshold_, cv::Size(8,8), cv::Size(24,16), 1.05, group_threshold_);
	
	//	cvReleaseMat(&z);
	cvReleaseMat(&uvd);
	cvReleaseMat(&xyz);
	
	// Timing
	gettimeofday(&timeofday,NULL);
	ros::Time endt = ros::Time().fromNSec(1e9*timeofday.tv_sec + 1e3*timeofday.tv_usec);
	ros::Duration diff = endt-startt;
	ROS_DEBUG_STREAM_NAMED("pedestrian_detector","Remove ceiling, duration " << diff.toSec() );
#endif	
      }
      
      // TODO: Publish the found people.

      for( int i = 0; i < (int)found.size(); i++ )
      {
	cv::Rect r = found[i];
	cv::rectangle(*small_img, r.tl(), r.br(), cv::Scalar(0,255,0), 1);
      }
      ostringstream fname;
      fname << "/tmp/left_HOG_OpenCV_results2/" << setfill('0') << setw(6) << counter << "L.jpg";
      counter ++;
      cv::imwrite(fname.str(), *small_img);
      //cv::imshow("people detector", img);
      //cv::waitKey(3);

      // TODO: Release the sequence and related memory.
      
    }
void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg)
{
	if(init){
		CvSize sz_ = cvSize (cam->x_res, cam->y_res);
    	capture_ = cvCreateImage (sz_, IPL_DEPTH_8U, 4);
		init = false;	
	}

	//If we've already gotten the cam info, then go ahead
	if(cam->getCamInfo_){
		try{
			tf::StampedTransform CamToOutput;
    			try{
					tf_listener->waitForTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, ros::Duration(1.0));
					tf_listener->lookupTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, CamToOutput);
   				}
    			catch (tf::TransformException ex){
      				ROS_ERROR("%s",ex.what());
    			}

      		capture_ = bridge_.imgMsgToCv (image_msg, "rgb8");
			marker_detector.Detect(capture_, cam, true, false, max_new_marker_error, max_track_error, CVSEQ, true);

			arPoseMarkers_.markers.clear ();
			for (size_t i=0; i<marker_detector.markers->size(); i++) 
			{
				//Get the pose relative to the camera
        		int id = (*(marker_detector.markers))[i].GetId(); 
				Pose p = (*(marker_detector.markers))[i].pose;
				double px = p.translation[0]/100.0;
				double py = p.translation[1]/100.0;
				double pz = p.translation[2]/100.0;
				double qx = p.quaternion[1];
				double qy = p.quaternion[2];
				double qz = p.quaternion[3];
				double qw = p.quaternion[0];

				tf::Quaternion rotation (qx,qy,qz,qw);
      			tf::Vector3 origin (px,py,pz);
      			tf::Transform t (rotation, origin);
				tf::Vector3 markerOrigin (0, 0, 0);
				tf::Transform m (tf::Quaternion::getIdentity (), markerOrigin);
				tf::Transform markerPose = t * m; // marker pose in the camera frame

				//Publish the transform from the camera to the marker		
				std::string markerFrame = "ar_marker_";
				std::stringstream out;
				out << id;
				std::string id_string = out.str();
				markerFrame += id_string;
				tf::StampedTransform camToMarker (t, image_msg->header.stamp, image_msg->header.frame_id, markerFrame.c_str());
    			tf_broadcaster->sendTransform(camToMarker);
				
				//Create the rviz visualization messages
				tf::poseTFToMsg (markerPose, rvizMarker_.pose);
				rvizMarker_.header.frame_id = image_msg->header.frame_id;
				rvizMarker_.header.stamp = image_msg->header.stamp;
				rvizMarker_.id = id;

				rvizMarker_.scale.x = 1.0 * marker_size/100.0;
				rvizMarker_.scale.y = 1.0 * marker_size/100.0;
				rvizMarker_.scale.z = 0.2 * marker_size/100.0;
				rvizMarker_.ns = "basic_shapes";
				rvizMarker_.type = visualization_msgs::Marker::CUBE;
				rvizMarker_.action = visualization_msgs::Marker::ADD;
				switch (id)
				{
				  case 0:
				    rvizMarker_.color.r = 0.0f;
				    rvizMarker_.color.g = 0.0f;
				    rvizMarker_.color.b = 1.0f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 1:
				    rvizMarker_.color.r = 1.0f;
				    rvizMarker_.color.g = 0.0f;
				    rvizMarker_.color.b = 0.0f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 2:
				    rvizMarker_.color.r = 0.0f;
				    rvizMarker_.color.g = 1.0f;
				    rvizMarker_.color.b = 0.0f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 3:
				    rvizMarker_.color.r = 0.0f;
				    rvizMarker_.color.g = 0.5f;
				    rvizMarker_.color.b = 0.5f;
				    rvizMarker_.color.a = 1.0;
				    break;
				  case 4:
				    rvizMarker_.color.r = 0.5f;
				    rvizMarker_.color.g = 0.5f;
				    rvizMarker_.color.b = 0.0;
				    rvizMarker_.color.a = 1.0;
				    break;
				  default:
				    rvizMarker_.color.r = 0.5f;
				    rvizMarker_.color.g = 0.0f;
				    rvizMarker_.color.b = 0.5f;
				    rvizMarker_.color.a = 1.0;
				    break;
				}
				rvizMarker_.lifetime = ros::Duration (1.0);
				rvizMarkerPub_.publish (rvizMarker_);

				//Get the pose of the tag in the camera frame, then the output frame (usually torso)				
				tf::Transform tagPoseOutput = CamToOutput * markerPose;

				//Create the pose marker messages
				ar_track_alvar::AlvarMarker ar_pose_marker;
				tf::poseTFToMsg (tagPoseOutput, ar_pose_marker.pose.pose);
      			ar_pose_marker.header.frame_id = output_frame;
			    ar_pose_marker.header.stamp = image_msg->header.stamp;
			    ar_pose_marker.id = id;
			    arPoseMarkers_.markers.push_back (ar_pose_marker);	
			}
			arMarkerPub_.publish (arPoseMarkers_);
		}
    	catch (sensor_msgs::CvBridgeException & e){
      		ROS_ERROR ("Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ());
    	}
	}
}
	void image_callback (const sensor_msgs::ImageConstPtr& received_image)
    {
      //if image is not de-bayered yet do it (needed for Prosilica image on PR2 robot)
      if (received_image->encoding.find("bayer") != std::string::npos)
      {
        cv::Mat tmpImage;
        const std::string& raw_encoding = received_image->encoding;
        int raw_type = CV_8UC1;
        if (raw_encoding == sensor_msgs::image_encodings::BGR8 || raw_encoding == sensor_msgs::image_encodings::RGB8)
          raw_type = CV_8UC3;
        const cv::Mat raw(received_image->height, received_image->width, raw_type,
                  const_cast<uint8_t*>(&received_image->data[0]), received_image->step);
        // Convert to color BGR
        int code = 0;
        if (received_image->encoding == sensor_msgs::image_encodings::BAYER_RGGB8)
          code = CV_BayerBG2BGR;
        else if (received_image->encoding == sensor_msgs::image_encodings::BAYER_BGGR8)
          code = CV_BayerRG2BGR;
        else if (received_image->encoding == sensor_msgs::image_encodings::BAYER_GBRG8)
          code = CV_BayerGR2BGR;
        else if (received_image->encoding == sensor_msgs::image_encodings::BAYER_GRBG8)
          code = CV_BayerGB2BGR;
        else
        {
          ROS_ERROR("[image_proc] Unsupported encoding '%s'", received_image->encoding.c_str());
          return;
        }
        cv::cvtColor(raw, tmpImage, code);

        cv::Mat tmp2;
        cv::cvtColor(tmpImage, tmp2, CV_BGR2GRAY);
        camera_image = cvCreateImage(cvSize(800, 600), IPL_DEPTH_8U, 1);
        IplImage ti;
        ti = tmp2;
        cvSetImageROI(&ti, cvRect(300, 300, 1848, 1450));
        cvResize(&ti, camera_image);
      }
      else
      {
        camera_image = bridge.imgMsgToCv(received_image, "mono8");
      }
      printf("\n\n");
      ROS_INFO("Image received!");
      //call main processing function

      if (camera_image->width == 2)
      {
    	  //if (visualization_mode_ == SEQUENCES && sequence_buffer.size() == 0)
    		  //return;

    	  ROS_INFO("Special image for end of sequence detected! Switching to SEQUENCE mode!\n");
    	  visualization_mode_ = SEQUENCES;

    	  //std::sort(sequence_buffer.begin(), sequence_buffer.end());

    	  visualize(camera_image, NULL, NULL);

    	  clear_sequence_buffer();
    	  return;
      }

      std::string result = process_image(camera_image);

      //image_pub_.publish(bridge.cvToImgMsg(image_roi));
      image_pub_.publish(bridge.cvToImgMsg(camera_image));
      ROS_INFO("[ODUFinder:] image published to %s", output_image_topic_.c_str());
      if (image_roi != NULL)
    	  cvReleaseImage(&image_roi);

      //msg.data = name.substr(0, name.find_last_of('.')).c_str();
      //publish the result to http://www.ros.org/wiki/cop
      if (result.length() > 0)
      {
        vision_msgs::cop_answer msg;
        vision_msgs::cop_descriptor cop_descriptor;
        vision_msgs::aposteriori_position aposteriori_position;
        msg.found_poses.push_back(aposteriori_position);
        msg.found_poses[0].models.push_back(cop_descriptor);
        //TODO: only one needed probably, ask Moritz
        msg.found_poses[0].models[0].type = "ODUFinder";
        msg.found_poses[0].models[0].sem_class = result;
        msg.found_poses[0].models[0].object_id = ++object_id;
        msg.found_poses[0].position = 0;
        template_publisher.publish(msg);
      }
    }
Esempio n. 25
0
//Callback to handle getting video frames and processing them
void getCapCallback (const sensor_msgs::ImageConstPtr & image_msg)
{
	if(init){
		CvSize sz_ = cvSize (cam->x_res, cam->y_res);
    	capture_ = cvCreateImage (sz_, IPL_DEPTH_8U, 4);
		init = false;	
	}

	//If we've already gotten the cam info, then go ahead
	if(cam->getCamInfo_){
		try{
			//Get the transformation from the Camera to the output frame for this image capture
			tf::StampedTransform CamToOutput;
    		try{
				tf_listener->waitForTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, ros::Duration(1.0));
				tf_listener->lookupTransform(output_frame, image_msg->header.frame_id, image_msg->header.stamp, CamToOutput);
   			}
    		catch (tf::TransformException ex){
      			ROS_ERROR("%s",ex.what());
    		}

    		visualization_msgs::Marker rvizMarker;
    		ar_track_alvar::AlvarMarker ar_pose_marker;
    		arPoseMarkers_.markers.clear ();

    		//Convert the image
      		capture_ = bridge_.imgMsgToCv (image_msg, "rgb8");

      		//Get the estimated pose of the main markers by using all the markers in each bundle
    		GetMultiMarkerPoses(capture_);
		
    		//Draw the observed markers that are visible and note which bundles have at least 1 marker seen
            for(int i=0; i<n_bundles; i++)
            	bundles_seen[i] = false;

			for (size_t i=0; i<marker_detector.markers->size(); i++)
			{
        		int id = (*(marker_detector.markers))[i].GetId();

				// Draw if id is valid
        		if(id >= 0){

        			//Mark the bundle that marker belongs to as "seen"
					for(int j=0; j<n_bundles; j++){
						for(int k=0; k<bundle_indices[j].size(); k++){
							if(bundle_indices[j][k] == id){
								bundles_seen[j] = true;
								break;
							}
						}
					}

 					// Don't draw if it is a master tag...we do this later, a bit differently
					bool should_draw = true;
					for(int i=0; i<n_bundles; i++){
						if(id == master_id[i]) should_draw = false;
					}
					if(should_draw){
        				Pose p = (*(marker_detector.markers))[i].pose;
        				makeMarkerMsgs(VISIBLE_MARKER, id, p, image_msg, CamToOutput, &rvizMarker, &ar_pose_marker);
        				rvizMarkerPub_.publish (rvizMarker);
        			}
				}
			}
			
			//Draw the main markers, whether they are visible or not -- but only if at least 1 marker from their bundle is currently seen
			for(int i=0; i<n_bundles; i++)
			{
                if(bundles_seen[i] == true){
    				makeMarkerMsgs(MAIN_MARKER, master_id[i], bundlePoses[i], image_msg, CamToOutput, &rvizMarker, &ar_pose_marker);
    				rvizMarkerPub_.publish (rvizMarker);
    				arPoseMarkers_.markers.push_back (ar_pose_marker);
				}
			}

			//Publish the marker messages
			arMarkerPub_.publish (arPoseMarkers_);
		}
    	catch (sensor_msgs::CvBridgeException & e){
      		ROS_ERROR ("Could not convert from '%s' to 'rgb8'.", image_msg->encoding.c_str ());
    	}
	}
}
Esempio n. 26
0
void FeatureTracker::image_callback(const sensor_msgs::ImageConstPtr& msg, const sensor_msgs::CameraInfoConstPtr& info_msg) {
  //need pose data for each picture, need to publish a camera pose
  ros::Time acquisition_time = msg->header.stamp;
  geometry_msgs::PoseStamped basePose;
  geometry_msgs::PoseStamped mapPose;
  basePose.pose.orientation.w=1.0;
  ros::Duration timeout(3);
  basePose.header.frame_id="/base_link";
  mapPose.header.frame_id="/map";
  
  try {
    tf_listener_.waitForTransform("/camera_1_link", "/map", acquisition_time, timeout);
    tf_listener_.transformPose("/map", acquisition_time,basePose,"/camera_1_link",mapPose);
    printf("pose #%d %f %f %f\n",pic_number++,mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
  }
  catch (tf::TransformException& ex) {
    ROS_WARN("[map_maker] TF exception:\n%s", ex.what());
    printf("[map_maker] TF exception:\n%s", ex.what());
    return;
  }
  cam_model.fromCameraInfo(info_msg);
  
  
  
  
  // printf("callback called\n");
  try
  {
    // if you want to work with color images, change from mono8 to bgr8
    if(image_rect==NULL){
      image_rect = cvCloneImage(bridge.imgMsgToCv(msg, "mono8"));
      last_image= cvCloneImage(bridge.imgMsgToCv(msg, "mono8"));
      pyrA=cvCreateImage(cvSize(last_image->width+8,last_image->height/3.0), IPL_DEPTH_32F, 1);
      pyrB=cvCloneImage(pyrA);
      //  printf("cloned image\n");
    }
    else{
      //save the last image
      cvCopy(image_rect,last_image);
      cvCopy(bridge.imgMsgToCv(msg, "mono8"),image_rect);
      // printf("copied image\n");
    }
    if(output_image==NULL){
      output_image =cvCloneImage(image_rect);
    }
    if(eigImage==NULL){
      eigImage =cvCloneImage(image_rect);
    }
    if(tempImage==NULL){
      tempImage =cvCloneImage(image_rect);
    }
  }
  catch (sensor_msgs::CvBridgeException& e)
  {
    ROS_ERROR("Could not convert from '%s' to 'mono8'.", msg->encoding.c_str());
    return;
  }
  
  if(image_rect!=NULL) {
    cvCopy(image_rect,output_image);
    
    printf("got image\n");
    
    track_features(mapPose);
    
    //draw features on the image
    for(int i=0;i<last_feature_count;i++){
      CvPoint center=cvPoint((int)features[i].x,(int)features[i].y);
      cvCircle(output_image,center,10,cvScalar(150),2);
      
      char strbuf [10];
      
      int n=sprintf(strbuf,"%d",current_feature_id[ i] );
      std::string text=std::string(strbuf,n);
      
      CvFont font;
      
      cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX,1,1);
      
      cvPutText(output_image,text.c_str(),cvPoint(center.x,center.y+20),&font,cvScalar(255));
      
      
      cv::Point3d tempRay;
      cv::Point2d tempPoint=cv::Point2d(features[i]);
      cam_model.projectPixelTo3dRay(tempPoint,tempRay);
  //    printf("%f x  %f y  %f z\n",tempRay.x,tempRay.y,tempRay.z);
    }
    
  //  featureList[0].print();
    
    //determine error gradient
    
    int min_features=10;
    
  //  printf("ypr %f %f %f\n",yaw,pitch,roll);
    
    cv::Point3d error_sum=calc_error(min_features,0, 0, 0);
    printf("total error is : %f\n",error_sum.x);
    
    for(int i=0;i<featureList.size();i++){
      if(min_features<featureList[i].numFeatures()){
	printf("\n\n\nfeature %d\n",i);
	printf("mean: %f %f %f\n",featureList[i].currentMean.x, featureList[i].currentMean.y, featureList[i].currentMean.z);
	
      }
    }
    
    
//    double error_up= calc_error(min_features,yalpha, 0, 0);
  //  printf("total up yaw error is : %f\n",error_up);
//    double error_down= calc_error(min_features,-yalpha, 0, 0);
  //  printf("total down yaw error is : %f\n",error_down);
  /*  
     
    double yaw_change=0;
    if(error_up<error_sum && error_up<error_down){
      yaw_change=yalpha;
    }else if(error_down<error_sum && error_down<error_up){
      yaw_change=-yalpha;
    }else if(error_down!=error_sum&&error_sum!=error_up){
      yalpha/=2;
    }
    
    error_up=   calc_error(min_features,0,palpha, 0);
   // printf("total up pitch error is : %f\n",error_up);
    error_down=   calc_error(min_features,0,-palpha, 0);
   // printf("total down pitch error is : %f\n",error_down);
    
    double pitch_change=0;
    if(error_up<error_sum && error_up<error_down){
      pitch_change=palpha;
    }else if(error_down<error_sum && error_down<error_up){
      pitch_change=-palpha;
    }else if(error_down!=error_sum&&error_sum!=error_up){
      //palpha/=2;
    }
    
    error_up=  calc_error(min_features,0,0,ralpha);
   // printf("total up roll error is : %f\n",error_up);
    
    error_down=   calc_error(min_features,0,0,-ralpha);
   // printf("total down roll error is : %f\n",error_down);
    
    double roll_change=0;
    if(error_up<error_sum && error_up<error_down){
      roll_change=ralpha;
    }else if(error_down<error_sum && error_down<error_up){
      roll_change=-ralpha;
    }else if(error_down!=error_sum&&error_sum!=error_up){
      ralpha/=2;
    }
    
  //  yaw+=yaw_change;
  
  //  pitch+=pitch_change;
 
  
  //   roll+=roll_change;
    */
    
    try{
      sensor_msgs::Image output_image_cvim =*bridge.cvToImgMsg(output_image, "mono8");
      output_image_cvim.header.stamp=msg->header.stamp;
      analyzed_pub_.publish(output_image_cvim);
    }
    catch (sensor_msgs::CvBridgeException& e){
      ROS_ERROR("Could not convert from '%s' to 'mono8'.", msg->encoding.c_str());
      return;
    }
    // printf("displaying image\n");
  }else{
    // printf("null image_rect\n");
  }
}
Esempio n. 27
0
bool is_thick(image_processor::ProcessBridge::Request    &req,
                 image_processor::ProcessBridge::Response &res )
{
    sensor_msgs::Image image = req.image;
    sensor_msgs::Image image2 = req.image2;
    sensor_msgs::ImagePtr img_ptr(new sensor_msgs::Image(image));
    
    sensor_msgs::CameraInfo cam_info = req.info;
    
    IplImage *cv_image;    
    IplImage *cv_image2;   
    try
        {
                cv_image = bridge_.imgMsgToCv(img_ptr, "bgr8");
        }
        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
   
        CvPoint2D64f temp_pts[128];
        double temp_params[128];
        //Crop image

        cvSetImageROI(cv_image,cvRect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
        IplImage* cropped_image = cvCloneImage(cv_image);
        cvSaveImage("/home/stephen/cropped_image1.png",cropped_image);
        sensor_msgs::ImagePtr img2_ptr(new sensor_msgs::Image(image2));
        try{
                cv_image2 = bridge_.imgMsgToCv(img2_ptr, "bgr8");
        }

        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
        cvSetImageROI(cv_image,cvRect(CROP_X,CROP_Y,CROP_WIDTH,CROP_HEIGHT));
        IplImage* cropped_image2 = cvCloneImage(cv_image2);
        
        cvSaveImage("/home/stephen/cropped_image2.png",cropped_image2);
        cvReleaseImage(&cropped_image);
        cvReleaseImage(&cropped_image2);
        IplImage* new_cropped_image = cvLoadImage("/home/stephen/cropped_image1.png");
        IplImage* new_cropped_image2 = cvLoadImage("/home/stephen/cropped_image2.png");
        IplImage *output_cv_image;
        int num_pts = 0;
        int num_params = 0;
        output_cv_image = is_thick_process(new_cropped_image,temp_pts,temp_params,num_pts,num_params,new_cropped_image2);
        

        for( int i = 0; i < num_pts; i++){
            res.pts_x.push_back(temp_pts[i].x);
            res.pts_y.push_back(temp_pts[i].y);
        }
        for ( int i = 0; i < num_params; i++){
           res.params.push_back(temp_params[i]);
        }
        sensor_msgs::ImagePtr output_img_ptr;
        sensor_msgs::Image output_img;
        try
        {
                output_img_ptr = bridge_.cvToImgMsg(output_cv_image, "bgr8");
                output_img = *output_img_ptr;
        }
        catch (sensor_msgs::CvBridgeException error)
        {
                ROS_ERROR("error");
                return false;
        }
    res.image_annotated = output_img;

    return true;
}