//callback method for operations that require a depth image
void depthCallback(const sensor_msgs::ImageConstPtr &msg)
{
  //Load image into OpenCV
  bridge_image_depth = cv_bridge::toCvCopy(msg, msg->encoding);
  std::cout << "bridge_image created" << std::endl;
  cvImage_depth = bridge_image_depth->image;
  std::cout << "cvImage created" << std::endl;
  
  //set cv_bridge image matrix to output and publish
  getDepthThresholdImage(cvImage_depth, depthThresholdOut);
  std::cout << "got depthThresholdImage" << std::endl;
  bridge_image_depth->image = depthThresholdOut;
  std::cout << "bridge_image->image = depthThresholdOut" << std::endl;
  depthThresholdPub.publish(bridge_image_depth->toImageMsg());
  //printImagePixels(depthThresholdOut, "output depth threshold");
  //std::cout << depthThresholdOut << "\n*****\n" << std::endl;
  
  //cv::Mat temp;
  //depthThresholdOut.copyTo(temp);
  
  //printImageData(depthThresholdOut, "depthThesholdOut");
  
  if (lastImage.rows > 0) {
    getImageDifference(cvImage_depth, lastImage);
    bridge_image_depth->image = lastImage;
    motionDetectPub.publish(bridge_image_depth->toImageMsg());
  } else {
    std::cout << "lastImage hasn't been created yet" << std::endl;
  }
  
  lastImage = cvImage_depth;
  
/*
  cv::Mat depthThresholdBWOut;
  //getDepthThresholdBWImage(depthThresholdOut, depthThresholdBWOut);
  //printImageData(depthThresholdBWOut,"depthBW");
  //depthThresholdBWOut = depthThresholdOut;
  bridge_image->image = depthThresholdBWOut;
  depthThresholdBWPub.publish(bridge_image->toImageMsg());
*/

  //delete &depthThresholdOut;
  //delete &cvImage;

/*cvReleaseMat(&cvImage);
cvReleaseMat(&depthThresholdOut); 
*/
}
Esempio n. 2
0
    bool getRectFromTopic(
      recognition::RectFromTopic::Request &request,
      recognition::RectFromTopic::Response &response
    )
    {
      if (lastImage == NULL)
        return false;

      cv_bridge::CvImage cvImg, cvMask;
      cvImg.header.stamp = ros::Time::now();
      cvImg.encoding = sensor_msgs::image_encodings::TYPE_8UC3;

      cvImg.image = lastImage->image(
        safeRect(
          lastImage->image,
          request.x, 
          request.y, 
          request.width, 
          request.height
        )
      );
      response.image = *cvImg.toImageMsg();
      ROS_INFO("Image");
      cv::Mat mask;
      if (lastDepth != NULL)
      {
        ROS_INFO("Depth");
        cv::Mat newDepth = lastDepth->image(
          safeRect(
            lastDepth->image,
            request.x, 
            request.y, 
            request.width, 
            request.height
          )
        );
        _getMaskFromDepth(newDepth, mask);
        cvMask.header.stamp = ros::Time::now();
        cvMask.encoding = sensor_msgs::image_encodings::TYPE_8UC1;
        cvMask.image = mask;
        response.mask = *cvMask.toImageMsg();
      }
      return true;
    }
 void equalizeHistogram( cv_bridge::CvImagePtr cvPtr )
 {
     cv::vector< cv::Mat > splitImages;
     cv::split( cvPtr->image, splitImages );
     for( int i = 0; i < splitImages.size(); i++ )
     {
         cv::equalizeHist( splitImages[i], splitImages[i] );
     }
     cv::merge( splitImages, cvPtr->image );
     histPub_.publish( cvPtr->toImageMsg() );
 }
 void imageCallback(const sensor_msgs::ImageConstPtr& image_msg)
 {
   try
   {
     input_bridge_ = cv_bridge::toCvCopy(image_msg, "mono8");
     output_bridge_ = cv_bridge::toCvCopy(image_msg, "bgr8");
   }
   catch (cv_bridge::Exception& ex)
   {
     ROS_ERROR("[calibrate] Failed to convert image");
     return;
   }
 
   Eigen::Vector3f translation;
   Eigen::Quaternionf orientation;
   
   if (!pattern_detector_.detectPattern(input_bridge_->image, translation, orientation, output_bridge_->image))
   {
     ROS_INFO("[calibrate] Couldn't detect checkerboard, make sure it's visible in the image.");
     return;
   }
   
   tf::Transform target_transform;
   tf::StampedTransform base_transform;
   try
   {
     ros::Time acquisition_time = image_msg->header.stamp;
     ros::Duration timeout(1.0 / 30.0);
                                  
     target_transform.setOrigin( tf::Vector3(translation.x(), translation.y(), translation.z()) );
     target_transform.setRotation( tf::Quaternion(orientation.x(), orientation.y(), orientation.z(), orientation.w()) );
     tf_broadcaster_.sendTransform(tf::StampedTransform(target_transform, image_msg->header.stamp, image_msg->header.frame_id, target_frame));
   }
   catch (tf::TransformException& ex)
   {
     ROS_WARN("[calibrate] TF exception:\n%s", ex.what());
     return;
   }
   publishCloud(ideal_points_, target_transform, image_msg->header.frame_id);
   
   overlayPoints(ideal_points_, target_transform, output_bridge_);
   
   // Publish calibration image
   pub_.publish(output_bridge_->toImageMsg());
   
   pcl_ros::transformPointCloud(ideal_points_, image_points_, target_transform);
   
   cout << "Got an image callback!" << endl;
   
   calibrate(image_msg->header.frame_id);
   
   ros::shutdown();
 }
void ImageFeed::publishImage()
{
  std::stringstream sstm;
  sstm << path_ << x << "_" << y << ".ppm";
  std::string next_image = sstm.str();
  msg_ptr_->image = cv::imread(next_image);
  
  cv::imshow(WINDOW, msg_ptr_->image);   
  cv::waitKey(30);
  ROS_INFO("path: %s", next_image.c_str());
  pub_.publish(msg_ptr_->toImageMsg());
}
Esempio n. 6
0
    bool insertTopic(
      recognition::InsertTopic::Request &request,
      recognition::InsertTopic::Response &response
    )
    {
      if (lastImage == NULL)
        return false;

      cv::Mat newImage = lastImage->image(
        safeRect(
          lastImage->image,
          request.x, 
          request.y, 
          request.width, 
          request.height
        )
      );

      cv::Mat mask;
      if (lastDepth != NULL)
      {
        cv::Mat newDepth = lastDepth->image(
          safeRect(
            lastDepth->image,
            request.x, 
            request.y, 
            request.width, 
            request.height
          )
        );
        _getMaskFromDepth(newDepth, mask);
      }

      algorithm->insertIntoDb(newImage, request.label, mask);

      response.success = true;

      return true;
    }
Esempio n. 7
0
    bool recognizeFromTopic(
      recognition::RecognizeFromTopic::Request &request,
      recognition::RecognizeFromTopic::Response &response
    )
    {
      if (lastImage == NULL)
        return false;

      cv::Mat newImg = lastImage->image(
        safeRect(
          lastImage->image,
          request.x, 
          request.y, 
          request.width, 
          request.height
        )
      );

      cv::Mat mask;
      if (lastDepth != NULL)
      {
        cv::Mat newDepth = lastDepth->image(
          safeRect(
            lastDepth->image,
            request.x, 
            request.y, 
            request.width, 
            request.height
          )
        );
        _getMaskFromDepth(newDepth, mask);

      }
      response.label = algorithm->recognize(newImg, mask);
      return true;
    }
    void applyMorphology( cv_bridge::CvImagePtr cvPtr )
    {
        cv::Mat element = cv::getStructuringElement( 0, morphSize_, cv::Point( 1, 1 ) );

        // close then open
        cv::morphologyEx(
            cvPtr->image, cvPtr->image,
            cv::MORPH_CLOSE, element );

        cv::morphologyEx(
            cvPtr->image, cvPtr->image,
            cv::MORPH_OPEN, element );


        morphPub_.publish( cvPtr->toImageMsg() );
    }
    double getSingleSurfProb( const cv::vector< cv::Point > &contour, cv_bridge::CvImagePtr cvPtr, int contourNum = 0 )
    {
        // Get the ROI for this contour
        cv::Rect boundingRect = cv::boundingRect( contour );
        cv::Mat imageROI = cvPtr->image( boundingRect );

        cv::vector< cv::KeyPoint > keypoints;

        surf_.detect( imageROI, keypoints );

        cv::Mat descriptors;
        surfDesc_.compute( imageROI, keypoints, descriptors );

        std::vector< cv::DMatch > matches;
        matcher_.match( descriptors, descriptors_, matches );

        // only take the 25 best matches
        std::nth_element( matches.begin(), matches.begin() + 24, matches.end() );
        matches.erase( matches.begin() + 25 );

        // get the distance of the 25 best matches
        double sum = 0;
        for( int i = 0; i < matches.size(); i++ )
            sum += 0; // matches[i].distance;

        // draw the matches
#ifdef NOIMSHOW
        cv::Mat drawing = cv::Mat::zeros( cvPtr->image.size(), CV_8UC3 );
        cv::drawMatches( roundelImage_, descriptors_,
                         ctPtr->image, descriptors,
                         matches,
                         drawing,
                         cv::Scalar( 255, 255, 255 ) );

        char windowName[255];
        sprintf( windowName, "Surf Matches %d", contourNum );
        cv::namedWindow( windowName );
        cv::imshow( windowName, drawing );
#endif

        // the probability is how close we are to 0
        return( fmax( 1.0 - sum, 0.0 ) );
    }
    void thresholdImage( cv_bridge::CvImagePtr cvPtr, bool byColor )
    {
        // blur first
        cv::GaussianBlur( cvPtr->image, cvPtr->image, cv::Size( 5, 5 ), 0 );

        if( byColor == true )
        {
            cv::cvtColor( cvPtr->image, cvPtr->image, CV_BGR2HSV );

            cv::Mat destImage;
            cv::Scalar lowerBound( std::max( hValue_ - hRange_/2, 0 ), 50, 50 );
            cv::Scalar upperBound( std::min( hValue_ + hRange_/2, 255 ), 255, 255 );

            cv::inRange( cvPtr->image, lowerBound, upperBound, cvPtr->image );
        }
        else
        {
            cv::cvtColor( cvPtr->image, cvPtr->image, CV_BGR2GRAY );

            cv::Point min_loc;
            cv::Point max_loc;
            double min_val;
            double max_val;

            cv::minMaxLoc( cvPtr->image,
                           &min_val,
                           &max_val,
                           &min_loc,
                           &max_loc );

            cv::threshold(
                cvPtr->image,
                cvPtr->image,
                max_val - 1,
                //100,
                255,
                CV_THRESH_BINARY );
        }

        cvPtr->encoding = enc::MONO8;
        thresholdPub_.publish(cvPtr->toImageMsg());
    }
Esempio n. 11
0
int main(int argc, char **argv)
{

ros::Rate loop_rate(1);

 ros::init(argc, argv, "image_listener");
  ros::NodeHandle nh;
  cv::namedWindow("view");
  cv::startWindowThread();
  image_transport::ImageTransport it(nh);

  image_transport::Subscriber sub_j1 = it.subscribe("videoJ1", 1, imageCallbackJ1);
  image_transport::Subscriber sub_j2 = it.subscribe("videoJ2", 1, imageCallbackJ2);

  while(ros::ok){


 if(captured ){

    cv::imshow("view", cv_bridge::toCvShare(cam1, "bgr8")->image);
}
 // imshow("J2_sub", jetson2);

ptrJ1.reset();

  ros::spinOnce();
 // cv::destroyWindow("J1_sub");
  //cv::destroyWindow("J2_sub");

 ros::spinOnce();

    loop_rate.sleep();


 }
}
    void imageCb(const sensor_msgs::ImageConstPtr& msg)
    {
        //cv_bridge::CvImagePtr cv_ptr;
        try
        {
            cv_ptr = cv_bridge::toCvCopy(msg, enc::BGR8);
        }
        catch (cv_bridge::Exception& e)
        {
            ROS_ERROR("cv_bridge exception: %s", e.what());
            return;
        }

        geometry_msgs::PoseStamped tmppose;

        CvPoint objectNextPos;
        int nbPixels;
/*
        objectNextPos = binarisation(&cv_ptr->image.operator IplImage(), &nbPixels, h_CD, s_CD, v_CD, tolerance_CD, Scalar(0,255,0));
        x_CD.data = objectNextPos.x;
        y_CD.data = objectNextPos.y;
*/
	Mat gray;
	cvtColor(cv_ptr->image, gray, CV_BGR2GRAY);
        // smooth it, otherwise a lot of false circles may be detected
        GaussianBlur( gray, gray, Size(9, 9), 2, 2 );
        vector<Vec3f> circles;
        HoughCircles(gray, circles, CV_HOUGH_GRADIENT, 2, 200, 200, 100, 50 );
        cvtColor(cv_ptr->image, gray, CV_BGR2GRAY);
	type_obj.data = 0;
        for( size_t i = 0; i < circles.size(); i++ )
        {
                Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
                int radius = cvRound(circles[i][2]);
                // draw the circle center
                circle( cv_ptr->image, center, 3, Scalar(0,255,0), -1, 8, 0 );
                // draw the circle outline
                circle( cv_ptr->image, center, radius, Scalar(0,0,255), 3, 8, 0 );

		x_CD.data = cvRound(circles[i][0]);
		y_CD.data = cvRound(circles[i][1]);

		type_obj.data = 1;

        }

/*
        if(x_CD.data < 1)
            type_obj.data = 0;
        else 
            type_obj.data = 1;
*/
        objectNextPos = binarisation(&cv_ptr->image.operator IplImage(), &nbPixels, h_BAR, s_BAR, v_BAR, tolerance_BAR, Scalar(0,0,255));
        x_BAR.data = objectNextPos.x;
        y_BAR.data = objectNextPos.y;
        if(x_BAR.data < 1)
            type_obj.data = type_obj.data;
        else
            type_obj.data = type_obj.data + 2;


//        imshow( "Camera output", cv_ptr->image );

        if(mode == TAKE_AUTONOMOUSLY) {

            if(type_obj.data > 0) {

                switch(type_obj.data) {
                    case 0 :
                        nb_img_cnt = 0;
                        break;
                    case 1 : // Only CD is seen
                        if(nb_img_cnt > MAX_CNT_OBJECT) { // Object seen X times
                            tmppose.pose.position.x = ((288)*90.0/144.0 -((double)y_CD.data)*90.0/144.0 + (50))/1000.0 - 0.02;
                            tmppose.pose.position.y = (-((double)x_CD.data)*250.0/352.0+(167.0*278.0/352.0))/1000.0 + 0.015;
                            tmppose.pose.position.z = 0.072;
                            object_pub.publish(tmppose);
                            ROS_ERROR("Taking CD : %f : %f", tmppose.pose.position.x, tmppose.pose.position.y);
                            nb_img_cnt = 0;
			    usleep(2000000);
                        }
                        else {
                            nb_img_cnt++;
                        }
                        break;
                    case 2 : // Only BAR is seen
                        if(nb_img_cnt > MAX_CNT_OBJECT) { // Object seen X times
                            tmppose.pose.position.x = ((480)*90.0/240.0 -((double)y_BAR.data)*90.0/240.0 + (50))/1000.0;
                            tmppose.pose.position.y = (-((double)x_BAR.data)*278.0/640.0+(335.0*278.0/640.0))/1000.0;
                            tmppose.pose.position.z = 0.062;
                            object_pub.publish(tmppose);
                            ROS_ERROR("Taking BAR : %f : %f", tmppose.pose.position.x, tmppose.pose.position.y);
                            nb_img_cnt = 0;
                        }
                        else {
                            nb_img_cnt++;
                        }
                        break;

                    case 3 : // Two different objects are seen
                        if(nb_img_cnt > MAX_CNT_OBJECT) { // Object seen X times

                            if(y_CD.data > y_BAR.data) {
                                ROS_ERROR("Taking CD");
                                nb_img_cnt = 0;
                            }
                            else {
                                ROS_ERROR("Taking BAR");
                                nb_img_cnt = 0;
                            }

                        }
                        else {
                            nb_img_cnt++;
                        }
                        break;
                }

            }
            else {
                nb_img_cnt--;
		if(nb_img_cnt < 0)
			nb_img_cnt = 0;
            }

        }
        else {
            nb_img_cnt = 0;
        }



        
        /*   Mat gray;
           cvtColor(cv_ptr->image, gray, CV_BGR2GRAY);
        // smooth it, otherwise a lot of false circles may be detected
        GaussianBlur( gray, gray, Size(9, 9), 2, 2 );
        vector<Vec3f> circles;
        HoughCircles(gray, circles, CV_HOUGH_GRADIENT, 2, 200, 200, 100, 50 );
        cvtColor(cv_ptr->image, gray, CV_BGR2GRAY);
        for( size_t i = 0; i < circles.size(); i++ )
        {
		Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
		int radius = cvRound(circles[i][2]);
		// draw the circle center
		circle( cv_ptr->image, center, 3, Scalar(0,255,0), -1, 8, 0 );
		// draw the circle outline
		circle( cv_ptr->image, center, radius, Scalar(0,0,255), 3, 8, 0 );
        }*/
        /*
           Canny( gray, gray, 16, 33, 3 );
           vector<Vec4i> lines;
           HoughLinesP( gray, lines, 1, CV_PI/180, 10.0, 240.0, 15.0 );
         */

        /*    for( size_t i = 0; i < lines.size(); i++ )
              {
              if( ((lines.size() - i) > 1) && (sqrt((lines[i][0]-lines[i+1][0])*(lines[i][0]-lines[i+1][0]) + (lines[i][1]-lines[i+1][1])*(lines[i][1]-lines[i+1][1])) > 125 ) && (sqrt((lines[i][0]-lines[i+1][0])*(lines[i][0]-lines[i+1][0]) + (lines[i][1]-lines[i+1][1])*(lines[i][1]-lines[i+1][1])) < 170 )) {
              line( cv_ptr->image, Point(lines[i][0], lines[i][1]),
              Point(lines[i+1][2], lines[i+1][3]), Scalar(255,255,0), 3, 8 );
              line( cv_ptr->image, Point(lines[i+1][0], lines[i+1][1]),
              Point(lines[i][2], lines[i][3]), Scalar(255,255,0), 3, 8 );
         */
        /* Test of line intersection */
        //	  int a, b, c, d, x, y;
        /* Segment equation */
        /*	  a = (lines[i][0] - lines[i+1][2]) / (lines[i][1] - lines[i+1][3]);
              b = lines[i][1] - a * lines[i][0];

              c = (lines[i+1][0] - lines[i][2]) / (lines[i+1][1] - lines[i][3]);
              d = lines[i+1][1] - c * lines[i+1][0];
         */	  /* Intersection point */
        /*	  x = (d - b) / (a - c);
              y = a * x + b;

              ROS_ERROR("intersection : %i | %i ", x, y);  

        // draw the circle center
        Point center2(cvRound(x), cvRound(y));
        circle( cv_ptr->image, center2, 3, Scalar(0,255,0), -1, 8, 0 );

        i++;
        break;
        }
        else {
        line( cv_ptr->image, Point(lines[i][0], lines[i][1]),
        Point(lines[i][2], lines[i][3]), Scalar(255,0,0), 3, 8 );
        }
        }
         */

        /*    Mat gray = Mat::zeros(cv_ptr->image.rows, cv_ptr->image.cols, CV_8UC3);
              Mat out;
              cvtColor(cv_ptr->image, gray, CV_BGR2GRAY);

        // smooth it, otherwise a lot of false circles may be detected
        GaussianBlur( gray, gray, Size(9, 9), 7, 9 );

        Canny( gray, out, 16, 33, 3 );
        Mat gray2;
        cvtColor(cv_ptr->image, gray2, CV_BGR2GRAY);
        //Canny( gray2, out, 16, 33, 3 );
         */
        /*
           vector<Vec3f> circles;
           HoughCircles(out, circles, CV_HOUGH_GRADIENT, 3, 506, 90, 56, 50 );

           for( size_t i = 0; i < circles.size(); i++ )
           {
           Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
           int radius = cvRound(circles[i][2]);
        // draw the circle center
        circle( cv_ptr->image, center, 3, Scalar(0,255,0), -1, 8, 0 );
        // draw the circle outline
        circle( cv_ptr->image, center, radius, Scalar(0,0,255), 3, 8, 0 );
        }
         */
        /*
           vector<Vec2f> lines;
           HoughLines( out, lines, 1, CV_PI/180, 100 );

           for( size_t i = 0; i < lines.size(); i++ )
           {
           float rho = lines[i][0];
           float theta = lines[i][1];
           double a = cos(theta), b = sin(theta);
           double x0 = a*rho, y0 = b*rho;
           Point pt1(cvRound(x0 + 1000*(-b)),
           cvRound(y0 + 1000*(a)));
           Point pt2(cvRound(x0 - 1000*(-b)),
           cvRound(y0 - 1000*(a)));
           line( cv_ptr->image, pt1, pt2, Scalar(0,0,255), 3, 8 );
           }
         */
        /*
           vector<Vec4i> lines;
           HoughLinesP( out, lines, 1, CV_PI/180, 10.0, 140.0, 15.0 );
           for( size_t i = 0; i < lines.size(); i++ )
           {
           line( cv_ptr->image, Point(lines[i][0], lines[i][1]),
           Point(lines[i][2], lines[i][3]), Scalar(255,0,0), 3, 8 );
           }
         */


        /*    cv::namedWindow( "Source", 1 );
              cv::imshow( "Source", cv_ptr->image );

              cv::Mat dst = cv::Mat::zeros(cv_ptr->image.rows, cv_ptr->image.cols, CV_8UC3);

              cv::vector<cv::vector<cv::Point> > contours;
              cv::vector<cv::Vec4i> hierarchy;

              cv::findContours( cv_ptr->image, contours, hierarchy,
              CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );

        // iterate through all the top-level contours,
        // draw each connected component with its own random color
        int idx = 0;
        for( ; idx >= 0; idx = hierarchy[idx][0] )
        {
        cv::Scalar color( rand()&255, rand()&255, rand()&255 );
        cv::drawContours( dst, contours, idx, color, CV_FILLED, 8, hierarchy );
        }

        //    if (cv_ptr->image.rows > 60 && cv_ptr->image.cols > 60)
        //      cv::circle(cv_ptr->image, cv::Point(50, 50), 10, CV_RGB(255,0,0));

        cv::imshow(WINDOW, cv_ptr->image);
        //    cv::imshow(WINDOW, dst);
         */


        image_pub_.publish(cv_ptr->toImageMsg());
        waitKey(3);
    }
    void callback(const sensor_msgs::ImageConstPtr& msg)
    {
        if (image_0_ == NULL)
        {
            // Take first image:
            try
            {
                image_0_ = cv_bridge::toCvCopy(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take first image: " << e.what());
                return;
            }

            ROS_INFO("First image taken");

            // Detect keypoints:
            detector_->detect(image_0_->image, keypoints_0_);
            ROS_INFO_STREAM(keypoints_0_.size() << " points found.");

            // Extract keypoints' descriptors:
            extractor_->compute(image_0_->image, keypoints_0_, descriptors_0_);
        }
        else
        {
            // Take second image:
            try
            {
                image_1_ = cv_bridge::toCvShare(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take image: " << e.what());
                return;
            }

            // Detect keypoints:
            std::vector<cv::KeyPoint> keypoints_1;
            detector_->detect(image_1_->image, keypoints_1);
            ROS_INFO_STREAM(keypoints_1.size() << " points found on the new image.");

            // Extract keypoints' descriptors:
            cv::Mat descriptors_1;
            extractor_->compute(image_1_->image, keypoints_1, descriptors_1);

            // Compute matches:
            std::vector<cv::DMatch> matches;
            match(descriptors_0_, descriptors_1, matches);

            // Compute homography:
            cv::Mat H;
            homography(keypoints_0_, keypoints_1, matches, H);

            // Draw matches:
            const int s = std::max(image_0_->image.rows, image_0_->image.cols);
            cv::Size size(s, s);
            cv::Mat draw_image;
            warped_image_ = boost::make_shared<cv_bridge::CvImage>(
                    image_0_->header, image_0_->encoding,
                    cv::Mat(size, image_0_->image.type()));
            if (!H.empty()) // filter outliers
            {
                std::vector<char> matchesMask(matches.size(), 0);

                const size_t N = matches.size();
                std::vector<int> queryIdxs(N), trainIdxs(N);
                for (size_t i = 0; i < N; ++i)
                {
                    queryIdxs[i] = matches[i].queryIdx;
                    trainIdxs[i] = matches[i].trainIdx;
                }

                std::vector<cv::Point2f> points1, points2;
                cv::KeyPoint::convert(keypoints_0_, points1, queryIdxs);
                cv::KeyPoint::convert(keypoints_1, points2, trainIdxs);

                cv::Mat points1t;
                cv::perspectiveTransform(cv::Mat(points1), points1t, H);

                double maxInlierDist = threshold_ < 0 ? 3 : threshold_;
                for (size_t i1 = 0; i1 < points1.size(); ++i1)
                {
                    if (cv::norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
                        matchesMask[i1] = 1;
                }
                // draw inliers
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

                // draw outliers
                for (size_t i1 = 0; i1 < matchesMask.size(); ++i1)
                    matchesMask[i1] = !matchesMask[i1];
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 0, 255), cv::Scalar(255, 0, 0),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_OVER_OUTIMG | cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

                ROS_INFO_STREAM("Number of inliers: " << cv::countNonZero(matchesMask));

                // Wrap the new image using the homography,
                // so we should have something similar to the original image:
                warpPerspective(
                        image_1_->image, warped_image_->image, H, size,
                        cv::INTER_LINEAR | cv::WARP_INVERSE_MAP);

                // Print the homography found:
                ROS_INFO_STREAM("Homography = " << H);
            }
            else
            {
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image);

                image_1_->image.copyTo(warped_image_->image);

                ROS_WARN_STREAM("No homography transformation found!");
            }

            // Publish warped image (using homography):
            warped_image_->header = image_1_->header;
            pub_.publish(warped_image_->toImageMsg());

            // Show images:
            cv::imshow("correspondences", draw_image);
            cv::imshow("homography", warped_image_->image);
            cv::waitKey(3);
        }
    }