Exemplo n.º 1
0
  void processImage(cv::Mat& image, cv::Mat& image_gray) {
    // alternative way is to grab, then retrieve; allows for
    // multiple grab when processing below frame rate - v4l keeps a
    // number of frames buffered, which can lead to significant lag
    //      m_cap.grab();
    //      m_cap.retrieve(image);

    // detect April tags (requires a gray scale image)
    //cv::cvtColor(image, image_gray, CV_BGR2GRAY);
    double t0;
    if (m_timing) {
      t0 = tic();
    }
    vector<AprilTags::TagDetection> detections = m_tagDetector->extractTags(image);
    if (m_timing) {
      double dt = tic()-t0;
      cout << "Extracting tags took " << dt << " seconds." << endl;
    }

    // print out each detection
    cout << detections.size() << " tags detected:" << endl;
    for (int i=0; i<detections.size(); i++) {
      print_detection(detections[i]);
    }

    // show the current image including any detections
    if (m_draw) {
      for (int i=0; i<detections.size(); i++) {
        // also highlight in the image
        detections[i].draw(image);
      }
      imshow(windowName, image); // OpenCV call
    }
  }
Exemplo n.º 2
0
  void cbImage(const sensor_msgs::ImageConstPtr& image_msg){
    if (not has_camera_info_){
      ROS_INFO_STREAM("[StreetNameDetectorNode] Still waiting for CameraInfo.");
      return;
    }
    /* Convert image to cv::Mat */
    cv_bridge::CvImageConstPtr rgb_ptr = cv_bridge::toCvShare(image_msg);
    /* Convert to gray image*/
    cv::Mat image = rgb_ptr->image;
    cv::Mat image_gray;
    cv::cvtColor(image, image_gray, CV_BGR2GRAY);

    /* Detect april tag */
    vector<AprilTags::TagDetection> detections = tag_detector_.extractTags(image_gray);
 
    /* Detect street name */
    //    cv::Mat im_in = image.clone();
    //    // pre-processing: keep green channel only for blobs
    //    cv::Mat im_channels[3];
    //    cv::split(image, im_channels);
    //    // set blue and red channels to 0
    //    im_channels[0]= cv::Mat::ones(image.rows, image.cols, CV_8UC1);
    //    im_channels[2]= cv::Mat::ones(image.rows, image.cols, CV_8UC1);
    //    im_channels[0] = 255 * im_channels[0];
    //    im_channels[2] = 255 * im_channels[2];
    //    //Merging green channel back to image
    //    cv::merge(im_channels, 3, image);

    vector<Region> regions_all, regions_black, regions_white, 
		regions_black_f, regions_white_f, regions_filtered;
    this->detect_regions(image, regions_black, regions_white);

    ROS_INFO_STREAM("Regions Black:\t" << regions_black.size());
    ROS_INFO_STREAM("Regions White:\t" << regions_white.size());

	// extract keypoints
	std::vector<cv::KeyPoint> kps1, kps2;
	// 1.1 detect FAST and SIFT in multi-scale
	kps1 = this->get_fast_corners(image, 3000);

    // try to draw something
    this->draw_regions(image, regions_black);
    cv::drawKeypoints(image, kps1, image);

    /* publish visualization */
    duckietown_msgs::AprilTags tags_msg;
    for (int i = 0; i < detections.size(); ++i){
      tags_msg.detections.push_back(toMsg(detections[i]));
      detections[i].draw(image);
    }
    tags_msg.header.stamp = image_msg->header.stamp;
    /* TODO: Should use the id of image frame? or the vehicle frame? */
    tags_msg.header.frame_id = image_msg->header.frame_id;

    pub_image_.publish(rgb_ptr->toImageMsg());

    pub_detection_.publish(tags_msg);
  }
Exemplo n.º 3
0
  void processImage(cv::Mat& image, cv::Mat& image_gray) {
    // alternative way is to grab, then retrieve; allows for
    // multiple grab when processing below frame rate - v4l keeps a
    // number of frames buffered, which can lead to significant lag
    //      m_cap.grab();
    //      m_cap.retrieve(image);

    // detect April tags (requires a gray scale image)
    //cv::cvtColor(image, image_gray, CV_BGR2GRAY);
    process_flag = true;
    cout<<"process begin"<<endl;
    double t = tic();
    double t0;
    if (m_timing) {
      t0 = tic();
    }
    vector<AprilTags::TagDetection> detections = m_tagDetector->extractTags(image_gray);
    if (m_timing) {
      double dt = tic()-t0;
      cout << "Extracting tags took " << dt << " seconds." << endl;
    }

    // print out each detection
    cout << detections.size() << " tags detected:" << endl;
    for (int i=0; i<detections.size(); i++) {
      print_detection(detections[i]);
    }

    // show the current image including any detections
    if (m_draw) {
      for (int i=0; i<detections.size(); i++) {
        // also highlight in the image
        detections[i].draw(image_gray);
      }
      imshow(windowName, image_gray); // OpenCV call
    }

    // optionally send tag information to serial port (e.g. to Arduino)
    if (m_arduino) {
      if (detections.size() > 0) {
        // only the first detected tag is sent out for now
        Eigen::Vector3d translation;
        Eigen::Matrix3d rotation;
        detections[0].getRelativeTranslationRotation(m_tagSize, m_fx, m_fy, m_px, m_py,
                                                     translation, rotation);
        m_serial.print(detections[0].id);
        m_serial.print(",");
        m_serial.print(translation(0));
        m_serial.print(",");
        m_serial.print(translation(1));
        m_serial.print(",");
        m_serial.print(translation(2));
        m_serial.print("\n");
      } else {
        // no tag detected: tag ID = -1
        m_serial.print("-1,0.0,0.0,0.0\n");
      }
    }
    cout<<"process end:"<<endl;
    process_flag = false;
  }