/*
* Find Playground in image and calc its Extrinsics
*/
bool PlaygroundDetector::detect(const cv::Mat &image, Playground &playground, Contours &candidateContours, aruco::CameraParameters &cameraParameters)
{
  // pg not valid
  playground.id = -1;

  // get all contours from color thres image
  Contours contours;
  findContours(image, contours);
  
  // search for L shaped contours
  filterContours(contours, candidateContours);
  
    if(candidateContours.size() < 4) return false;
  
  // combine exatly 4 L-contours to one rectangle with 4 corners
  std::vector<cv::Point2f> corners; // max length: 4
  extractPlayGroundCorners(candidateContours, corners);
  
    if(corners.size() != 4) return false;
  
  playground.resize(4); 
  std::copy(corners.begin(), corners.end(), playground.begin());
  
  // playground valid
  playground.id = 0;
  
  // calc translation and rotation
  playground.calculateExtrinsics(cameraParameters);
  
  return true;
}
示例#2
0
/**
 * Find and isolate the digits of the counter,
 */
void ImageProcessor::findCounterDigits() {
    log4cpp::Category& rlog = log4cpp::Category::getRoot();

    // edge image
    cv::Mat edges = cannyEdges();
    if (_debugEdges) {
        cv::imshow("edges", edges);
    }

    cv::Mat img_ret = edges.clone();

    // find contours in whole image
    std::vector<std::vector<cv::Point> > contours, filteredContours;
    std::vector<cv::Rect> boundingBoxes;
    cv::findContours(edges, contours, CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE);
    // filter contours by bounding rect size
    filterContours(contours, boundingBoxes, filteredContours);

    rlog << log4cpp::Priority::INFO << "number of filtered contours: " << filteredContours.size();

    // find bounding boxes that are aligned at y position
    std::vector<cv::Rect> alignedBoundingBoxes, tmpRes;
    for (std::vector<cv::Rect>::const_iterator ib = boundingBoxes.begin(); ib != boundingBoxes.end(); ++ib) {
        tmpRes.clear();
        findAlignedBoxes(ib, boundingBoxes.end(), tmpRes);
        if (tmpRes.size() > alignedBoundingBoxes.size()) {
            alignedBoundingBoxes = tmpRes;
        }
    }
    rlog << log4cpp::Priority::INFO << "max number of alignedBoxes: " << alignedBoundingBoxes.size();

    // sort bounding boxes from left to right
    std::sort(alignedBoundingBoxes.begin(), alignedBoundingBoxes.end(), sortRectByX());

    if (_debugEdges) {
        // draw contours
        cv::Mat cont = cv::Mat::zeros(edges.rows, edges.cols, CV_8UC1);
        cv::drawContours(cont, filteredContours, -1, cv::Scalar(255));
        cv::imshow("contours", cont);
    }

    // cut out found rectangles from edged image
    for (int i = 0; i < alignedBoundingBoxes.size(); ++i) {
        cv::Rect roi = alignedBoundingBoxes[i];
        _digits.push_back(img_ret(roi));
        if (_debugDigits) {
            cv::rectangle(_img, roi, cv::Scalar(0, 255, 0), 2);
        }
    }
}
std::vector< std::vector<cv::Point> > CMinSquareRecognizing::getContours()
{
    try {
        cv::Mat gray;

        cv::cvtColor( image, gray, CV_RGB2GRAY );
        cv::threshold( gray, gray, 128, 255, CV_THRESH_BINARY );

        std::vector< std::vector<cv::Point> > contours;
        std::vector<cv::Vec4i> hierarchy;

        cv::Mat contourOutput = gray.clone();
        cv::findContours( contourOutput, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE );

        std::cout << contours.size() << std::endl;

        std::vector< std::vector<cv::Point> > filteredContours = filterContours( contours );
        std::cout << filteredContours.size() << std::endl;

        cv::Mat imageWithContours = cv::Mat::zeros( gray.size(), CV_8UC3 );
        for( int i = 0; i < filteredContours.size(); ++i ) {
            cv::drawContours( imageWithContours, filteredContours, i, cv::Scalar( 255, 255, 255 ), 1, 8 );
        }

        cv::bitwise_not( imageWithContours, imageWithContours );

        cv::imwrite( imageSavePath, imageWithContours );

        return filteredContours;

    } catch( cv::Exception e ) {
        std::cout << e.msg;
    }

    return std::vector< std::vector<cv::Point> >();
}
void CUpperGoalDetector::detectBlobs(CVideoFrame * pFrame, CFrameGrinder* pFrameGrinder)
{
    try
    {
        static struct timespec timeLastCameraFrame = {0};
        static struct timespec timeNow = {0};
        static cv::Scalar lowerBounds = cv::Scalar(79,0,150);
	static cv::Scalar upperBounds = cv::Scalar(96,255,250);
        
        cv::Mat img_hsv, img_blur, goal_blob;
        static int iCount = 0;
        
        int timeSinceLastCameraFrameMilliseconds = (int) CTestMonitor::getDeltaTimeMilliseconds(
                timeLastCameraFrame,
                pFrame->m_timeAddedToQueue[(int) CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT]);
        timeLastCameraFrame = pFrame->m_timeAddedToQueue[(int) CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT];

        // RBG is flawed as a way to filter based on color because the brightness is combined 
        // with the color info. 
        // Not so with HSV, where Hue and Saturation are maintained separately
        // OpenCV has a handy conversion from RGB to HSV
        cv::cvtColor(pFrame->m_frame, img_hsv, CV_BGR2HSV);
        
        cv::GaussianBlur(img_hsv, img_blur, cv::Size(5,5),1.5);

       // Look for the green hue we are emitting from the LED halo 
        if(g_settings.isDynamicSettingsEnabled())
        {
             g_settings.getValueFromFile(CSetting::SETTING_FILTER_HUE_LOWER_BOUND);
             g_settings.getValueFromFile(CSetting::SETTING_FILTER_HUE_UPPER_BOUND);
        }
        if(g_settings.isValueChanged(CSetting::SETTING_FILTER_HUE_LOWER_BOUND))
        {
            lowerBounds = cv::Scalar(g_settings.getSetting(CSetting::SETTING_FILTER_HUE_LOWER_BOUND),0,150);
        }
        if(g_settings.isValueChanged(CSetting::SETTING_FILTER_HUE_UPPER_BOUND))
        {
            upperBounds = cv::Scalar(g_settings.getSetting(CSetting::SETTING_FILTER_HUE_UPPER_BOUND),255,250);
        }

        // Find the bright response from the retro-reflective tape
        cv::inRange(img_blur, lowerBounds, upperBounds, goal_blob);
        pFrame->m_filteredFrame = goal_blob.clone();
            
        iCount++;
        if ((iCount % 17) == 0)
        {
            pFrameGrinder->m_testMonitor.saveFrameToJpeg(pFrame->m_filteredFrame);
        }

        //Find the contours. Use the contourOutput Mat so the original image doesn't get overwritten
        cv::vector<std::vector<cv::Point> > goalContours;
        cv::findContours(goal_blob, goalContours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
        
        CUpperGoalRectangle upperGoalRectangle;
        float upperGoalAzimuthDegrees = 0.0;
        float distanceToUpperGoalInches = 0.0;
        bool isUpperGoalFound = false;
        isUpperGoalFound = filterContours(goalContours, pFrame->m_frame.rows, pFrame->m_frame.cols,
                upperGoalRectangle, upperGoalAzimuthDegrees, distanceToUpperGoalInches);
       
        CTestMonitor::getTicks(&timeNow);
        int timeLatencyThisCameraFrameMilliseconds = (int) CTestMonitor::getDeltaTimeMilliseconds(
                pFrame->m_timeAddedToQueue[(int) CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT],
                timeNow);

        pFrame->m_targetInfo.updateTargetInfo(
                timeSinceLastCameraFrameMilliseconds, timeLatencyThisCameraFrameMilliseconds, 
                isUpperGoalFound, upperGoalAzimuthDegrees, distanceToUpperGoalInches, upperGoalRectangle.center.x);

        pFrame->updateAnnotationInfo(upperGoalRectangle);

        m_gpioLed.setGreenLED(isUpperGoalFound, pFrame->m_timeRemovedFromQueue[(int) CVideoFrame::FRAME_QUEUE_WAIT_FOR_BLOB_DETECT]);
    }
    catch (...)
    {
    }
}