void PatternDetector::buildPatternFromImage(const cv::Mat& image, Pattern& pattern) const
{
    int numImages = 4;
    float step = sqrtf(2.0f);

    // Store original image in pattern structure
    pattern.size = cv::Size(image.cols, image.rows);
    pattern.frame = image.clone();
    getGray(image, pattern.grayImg);
    
    // Build 2d and 3d contours (3d contour lie in XY plane since it's planar)
    pattern.points2d.resize(4);
    pattern.points3d.resize(4);

    // Image dimensions
    const float w = image.cols;
    const float h = image.rows;

    // Normalized dimensions:
    const float maxSize = std::max(w,h);
    const float unitW = w / maxSize;
    const float unitH = h / maxSize;

    pattern.points2d[0] = cv::Point2f(0,0);
    pattern.points2d[1] = cv::Point2f(w,0);
    pattern.points2d[2] = cv::Point2f(w,h);
    pattern.points2d[3] = cv::Point2f(0,h);

    pattern.points3d[0] = cv::Point3f(-unitW, -unitH, 0);
    pattern.points3d[1] = cv::Point3f( unitW, -unitH, 0);
    pattern.points3d[2] = cv::Point3f( unitW,  unitH, 0);
    pattern.points3d[3] = cv::Point3f(-unitW,  unitH, 0);

    extractFeatures(pattern.grayImg, pattern.keypoints, pattern.descriptors);
}
//! Computes the integral image of image img.  Assumes source image to be a 
//! 32-bit floating point.  Returns IplImage of 32-bit float form.
IplImage *Integral(IplImage *source) {
	// convert the image to single channel 32f
	IplImage *img = getGray(source);
	IplImage *int_img = cvCreateImage(cvGetSize(img), IPL_DEPTH_32F, 1);

	// set up variables for data access
	int height = img->height;
	int width = img->width;
	int step = img->widthStep / sizeof(float);
	float *data = (float *) img->imageData;
	float *i_data = (float *) int_img->imageData;

	// first row only
	float rs = 0.0f;
	for (int j = 0; j < width; j++) {
		rs += data[j];
		i_data[j] = rs;
	}

	// remaining cells are sum above and to the left
	for (int i = 1; i < height; ++i) {
		rs = 0.0f;
		for (int j = 0; j < width; ++j) {
			rs += data[i * step + j];
			i_data[i * step + j] = rs + i_data[(i - 1) * step + j];
		}
	}

	// release the gray image
	cvReleaseImage(&img);

	// return the integral image
	return int_img;
}
Ejemplo n.º 3
0
//! Processes a frame and returns output image 
bool EdgeDetectionSample::processFrame(const cv::Mat& inputFrame, cv::Mat& outputFrame)
{
  getGray(inputFrame, grayImage);
  
  cv::Canny(grayImage, edges, 50, 150);
  
  cv::cvtColor(edges, outputFrame, CV_GRAY2BGRA);
  return true;
}
Ejemplo n.º 4
0
/*
=============
getData

Gets the image data for the specified bit depth.
=============
*/
char *getData (FILE *s, int sz, int iBits)
{
    if (iBits == 32)
        return getRGBA (s, sz);
    else if (iBits == 24)
        return getRGB (s, sz);	
    else if (iBits == 8)
        return getGray (s, sz);
}
Ejemplo n.º 5
0
 vector<vector<int>> imageSmoother(vector<vector<int>>& M) {
     const auto m = M.size(), n = M[0].size();
     vector<vector<int>> result(M);
     for (int i = 0; i < m; ++i) {
         for (int j = 0; j < n; ++j) {
             result[i][j] = getGray(M, i, j);
         }
     }
     return result;
 }
Ejemplo n.º 6
0
//! Processes a frame and returns output image 
bool ObjectTrackingSample::processFrame(const cv::Mat& inputFrame, cv::Mat& outputFrame)
{
    // display the frame
    inputFrame.copyTo(outputFrame);
    
    // convert input frame to gray scale
    getGray(inputFrame, imageNext);
    
    // prepare the tracking class
    ObjectTrackingClass ot;
    ot.setMaxCorners(m_maxCorners);
    
    // begin tracking object
    if ( trackObject ) {
        ot.track(outputFrame,
                 imagePrev,
                 imageNext,
                 pointsPrev,
                 pointsNext,
                 status,
                 err);
        
        // check if the next points array isn't empty
        if ( pointsNext.empty() )
            trackObject = false;
    }
       
    // store the reference frame as the object to track
    if ( computeObject ) {
        ot.init(outputFrame, imagePrev, pointsNext);
        trackObject = true;
        computeObject = false;
    }
    
    // (test) drawing of custom points
    size_t i;
    for( i = 0; i < customPoints.size(); i++ )
    {        
        cv::circle( outputFrame, customPoints[i], 3, cv::Scalar(255,255,0), -1, 8);
    }
    
    // backup previous frame
    imageNext.copyTo(imagePrev);
    
    // backup points array
    std::swap(pointsNext, pointsPrev);
    
    return true;
}
 vector<vector<int>> imageSmoother(vector<vector<int>>& M) {
     int m = M.size();
     if (m == 0)
         return {};
     int n = M[0].size();
     if (n == 0)
         return {};
     vector<vector<int>> res = M;
     for (int i = 0; i < m; i++) {
         for (int j = 0; j < n; j++) {
             res[i][j] = getGray(i, j, M);
         }
     }
     return res;
 }
//! Processes a frame and returns output image
bool ContourDetectorProcessor::processFrame(const cv::Mat& inputFrame, cv::Mat& outputFrame)
{
    getGray(inputFrame, gray);
    
    cv::Mat edges;
    cv::Canny(gray, edges, 50, 150);
    
    std::vector< std::vector<cv::Point> > c;
    
	cv::findContours(edges, c, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
    
    inputFrame.copyTo(outputFrame);
    cv::drawContours(outputFrame, c, -1, CV_RGB(0,200,0));
    
    return true;
}
Ejemplo n.º 9
0
bool GrayCodeModule::run(){

    if(m_data->currentImage == NULL || m_data->currentImage->isNull()) {
        AppendToLog("GrayCodeModule: Warning: No current image. Aborting execution...\n");
        return false;
    }

    if(first == false) {
        if(m_data->grayImage != NULL)
            delete m_data->grayImage;

        m_data->grayImage = new QImage(m_data->currentImage->width(),
                                       m_data->currentImage->height(),
                                       QImage::Format_ARGB32);
        if(m_data->grayImage->isNull()) {
            AppendToLog("GrayCodeModule: Warning: Cannot create gray image from current image.\n");
            delete m_data->grayImage;
            m_data->grayImage = NULL;
            return false;
        } else
            first = true;
    }



    if(    m_data->currentImage->width() != m_data->grayImage->width()
        || m_data->currentImage->height() != m_data->grayImage->height()) {
        delete m_data->grayImage;
        m_data->grayImage = new QImage(m_data->currentImage->width(),
                                       m_data->currentImage->height(),
                                       QImage::Format_ARGB32);
        if(m_data->grayImage->isNull()) {
            AppendToLog("GrayCodeModule: Warning: Cannot create gray image from current image.\n");
            delete m_data->grayImage;
            m_data->grayImage = NULL;
            return false;
        }
    }



    getGray(m_data->grayImage);

    return true;
}
//! Processes a frame and returns output image 
bool ObjectTrackingSample::processFrame(const cv::Mat& inputFrame, cv::Mat& outputFrame)
{
    // display the frame
    inputFrame.copyTo(outputFrame);
    
    // convert input frame to gray scale
    getGray(inputFrame, imageNext);
    
    // prepare the tracking class
    ObjectTrackingClass ot;
    ot.setMaxCorners(m_maxCorners);
    
    // begin tracking object
    if ( trackObject ) {
        ot.track(outputFrame,
                 imagePrev,
                 imageNext,
                 pointsPrev,
                 pointsNext,
                 status,
                 err);
        
        // check if the next points array isn't empty
        if ( pointsNext.empty() )
            trackObject = false;
    }
       
    // store the reference frame as the object to track
    if ( computeObject ) {
        ot.init(outputFrame, imagePrev, pointsNext);
        trackObject = true;
        computeObject = false;
    }
    
    // backup previous frame
    imageNext.copyTo(imagePrev);
    
    // backup points array
    std::swap(pointsNext, pointsPrev);
    
    return true;
}
Ejemplo n.º 11
0
bool PatternDetector::findPattern(const cv::Mat& image, PatternTrackingInfo& info)
{
    // Convert input image to gray
    getGray(image, m_grayImg);
    
    // Extract feature points from input gray image
    extractFeatures(m_grayImg, m_queryKeypoints, m_queryDescriptors);
    
    // Get matches with current pattern
    getMatches(m_queryDescriptors, m_matches);

#if _DEBUG
    cv::showAndSave("Raw matches", getMatchesImage(image, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
#endif

#if _DEBUG
    cv::Mat tmp = image.clone();
#endif

    // Find homography transformation and detect good matches
    bool homographyFound = refineMatchesWithHomography(
        m_queryKeypoints, 
        m_pattern.keypoints, 
        homographyReprojectionThreshold, 
        m_matches, 
        m_roughHomography);

    if (homographyFound)
    {
#if _DEBUG
        cv::showAndSave("Refined matches using RANSAC", getMatchesImage(image, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
#endif
        // If homography refinement enabled improve found transformation
        if (enableHomographyRefinement)
        {
            // Warp image using found homography
            cv::warpPerspective(m_grayImg, m_warpedImg, m_roughHomography, m_pattern.size, cv::WARP_INVERSE_MAP | cv::INTER_CUBIC);
#if _DEBUG
            cv::showAndSave("Warped image",m_warpedImg);
#endif
            // Get refined matches:
            std::vector<cv::KeyPoint> warpedKeypoints;
            std::vector<cv::DMatch> refinedMatches;

            // Detect features on warped image
            extractFeatures(m_warpedImg, warpedKeypoints, m_queryDescriptors);

            // Match with pattern
            getMatches(m_queryDescriptors, refinedMatches);

            // Estimate new refinement homography
            homographyFound = refineMatchesWithHomography(
                warpedKeypoints, 
                m_pattern.keypoints, 
                homographyReprojectionThreshold, 
                refinedMatches, 
                m_refinedHomography);
#if _DEBUG
            cv::showAndSave("MatchesWithRefinedPose", getMatchesImage(m_warpedImg, m_pattern.grayImg, warpedKeypoints, m_pattern.keypoints, refinedMatches, 100));
#endif
            // Get a result homography as result of matrix product of refined and rough homographies:
            info.homography = m_roughHomography * m_refinedHomography;

            // Transform contour with rough homography
#if _DEBUG
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, m_roughHomography);
            info.draw2dContour(tmp, CV_RGB(0,200,0));
#endif

            // Transform contour with precise homography
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, info.homography);
#if _DEBUG
            info.draw2dContour(tmp, CV_RGB(200,0,0));
#endif
        }
        else
        {
            info.homography = m_roughHomography;

            // Transform contour with rough homography
            cv::perspectiveTransform(m_pattern.points2d, info.points2d, m_roughHomography);
#if _DEBUG
            info.draw2dContour(tmp, CV_RGB(0,200,0));
#endif
        }
    }

#if _DEBUG
    if (1)
    {
        cv::showAndSave("Final matches", getMatchesImage(tmp, m_pattern.frame, m_queryKeypoints, m_pattern.keypoints, m_matches, 100));
    }
    std::cout << "Features:" << std::setw(4) << m_queryKeypoints.size() << " Matches: " << std::setw(4) << m_matches.size() << std::endl;
#endif

    return homographyFound;
}
Ejemplo n.º 12
0
void CGrayColor::print(std::ostream &os) const {
  os << "CGrayColor(" << getGray() << ")";
}
//! Sets the reference frame for latter processing
void ObjectTrackingSample::setReferenceFrame(const cv::Mat& reference)
{
    getGray(reference, imagePrev);
    computeObject = true;
}
Ejemplo n.º 14
0
/*!
    Saves integral Image in d_intImage on the GPU
    \param source Input Image as grabbed by OpenCv
*/
void Surf::computeIntegralImage(IplImage* source)
{
    //! convert the image to single channel 32f

    // TODO This call takes about 4ms (is there any way to speed it up?)
    IplImage *img = getGray(source);

    // set up variables for data access
    int height = img->height;
    int width = img->width;
    float *data = (float*)img->imageData;

    cl_kernel scan_kernel;
    cl_kernel transpose_kernel;

    if(isUsingImages()) {
        // Copy the data to the GPU
        cl_copyImageToDevice(this->d_intImage, data, height, width);

        scan_kernel = this->kernel_list[KERNEL_SCANIMAGE];
        transpose_kernel = this->kernel_list[KERNEL_TRANSPOSEIMAGE];
    }
    else {
        // Copy the data to the GPU
        cl_copyBufferToDevice(this->d_intImage, data, sizeof(float)*width*height);

        // If it is possible to use the vector scan (scan4) use
        // it, otherwise, use the regular scan
        if(cl_deviceIsAMD() && width % 4 == 0 && height % 4 == 0) 
        {
            // NOTE Change this to KERNEL_SCAN when running verification code.
            //      The reference code doesn't use a vector type and
            //      scan4 produces a slightly different integral image
            scan_kernel = this->kernel_list[KERNEL_SCAN4];
        }
        else 
        {
            scan_kernel = this->kernel_list[KERNEL_SCAN];
        }
        transpose_kernel = this->kernel_list[KERNEL_TRANSPOSE];
    }
    

    // -----------------------------------------------------------------
    // Step 1: Perform integral summation on the rows
    // -----------------------------------------------------------------

    size_t localWorkSize1[2]={64, 1};
    size_t globalWorkSize1[2]={64, height};

    cl_setKernelArg(scan_kernel, 0, sizeof(cl_mem), (void *)&(this->d_intImage));
    cl_setKernelArg(scan_kernel, 1, sizeof(cl_mem), (void *)&(this->d_tmpIntImage)); 
    cl_setKernelArg(scan_kernel, 2, sizeof(int), (void *)&height);
    cl_setKernelArg(scan_kernel, 3, sizeof(int), (void *)&width);

    cl_executeKernel(scan_kernel, 2, globalWorkSize1, localWorkSize1, "Scan", 0);

    // -----------------------------------------------------------------
    // Step 2: Transpose
    // -----------------------------------------------------------------

    size_t localWorkSize2[]={16, 16};
    size_t globalWorkSize2[]={roundUp(width,16), roundUp(height,16)};

    cl_setKernelArg(transpose_kernel, 0, sizeof(cl_mem), (void *)&(this->d_tmpIntImage));  
    cl_setKernelArg(transpose_kernel, 1, sizeof(cl_mem), (void *)&(this->d_tmpIntImageT1)); 
    cl_setKernelArg(transpose_kernel, 2, sizeof(int), (void *)&height);
    cl_setKernelArg(transpose_kernel, 3, sizeof(int), (void *)&width);

    cl_executeKernel(transpose_kernel, 2, globalWorkSize2, localWorkSize2, "Transpose", 0);

    // -----------------------------------------------------------------
    // Step 3: Run integral summation on the rows again (same as columns
    //         integral since we've transposed). 
    // -----------------------------------------------------------------

    int heightT = width;
    int widthT = height;

    size_t localWorkSize3[2]={64, 1};
    size_t globalWorkSize3[2]={64, heightT};

    cl_setKernelArg(scan_kernel, 0, sizeof(cl_mem), (void *)&(this->d_tmpIntImageT1));
    cl_setKernelArg(scan_kernel, 1, sizeof(cl_mem), (void *)&(this->d_tmpIntImageT2)); 
    cl_setKernelArg(scan_kernel, 2, sizeof(int), (void *)&heightT);
    cl_setKernelArg(scan_kernel, 3, sizeof(int), (void *)&widthT);

    cl_executeKernel(scan_kernel, 2, globalWorkSize3, localWorkSize3, "Scan", 1);

    // -----------------------------------------------------------------
    // Step 4: Transpose back
    // -----------------------------------------------------------------

    size_t localWorkSize4[]={16, 16};
    size_t globalWorkSize4[]={roundUp(widthT,16), roundUp(heightT,16)};

    cl_setKernelArg(transpose_kernel, 0, sizeof(cl_mem), (void *)&(this->d_tmpIntImageT2)); 
    cl_setKernelArg(transpose_kernel, 1, sizeof(cl_mem), (void *)&(this->d_intImage));
    cl_setKernelArg(transpose_kernel, 2, sizeof(int), (void *)&heightT);
    cl_setKernelArg(transpose_kernel, 3, sizeof(int), (void *)&widthT);

    cl_executeKernel(transpose_kernel, 2, globalWorkSize4, localWorkSize4, "Transpose", 1);

    // release the gray image
    cvReleaseImage(&img);
}
Ejemplo n.º 15
0
//! Processes a frame and returns output image
bool VideoTrackingSample::processFrame(const cv::Mat& inputFrame, cv::Mat& outputFrame)
{
    inputFrame.copyTo(outputFrame);
    
    getGray(inputFrame, m_nextImg);
    
    if (m_activeTrackingAlgorithm == TrackingAlgorithmKLT)
    {
        
        if (m_mask.rows != inputFrame.rows || m_mask.cols != inputFrame.cols)
            m_mask.create(inputFrame.rows, inputFrame.cols, CV_8UC1);
        
        if (m_prevPts.size() > 0)
        {
            cv::calcOpticalFlowPyrLK(m_prevImg, m_nextImg, m_prevPts, m_nextPts, m_status, m_error);
        }
        
        m_mask = cv::Scalar(255);
        
        std::vector<cv::Point2f> trackedPts;
        
        for (size_t i=0; i<m_status.size(); i++)
        {
            if (m_status[i])
            {
                trackedPts.push_back(m_nextPts[i]);
                
                cv::circle(m_mask, m_prevPts[i], 15, cv::Scalar(0), CV_FILLED);
                cv::line(outputFrame, m_prevPts[i], m_nextPts[i], CV_RGB(0,250,0));
                cv::circle(outputFrame, m_nextPts[i], 3, CV_RGB(0,250,0), CV_FILLED);
            }
        }
        
        bool needDetectAdditionalPoints = trackedPts.size() < m_maxNumberOfPoints;
        if (needDetectAdditionalPoints)
        {
            m_detector->detect(m_nextImg, m_nextKeypoints, m_mask);
            int pointsToDetect = m_maxNumberOfPoints -  trackedPts.size();
            
            if (m_nextKeypoints.size() > pointsToDetect)
            {
                std::random_shuffle(m_nextKeypoints.begin(), m_nextKeypoints.end());
                m_nextKeypoints.resize(pointsToDetect);
            }
            
            std::cout << "Detected additional " << m_nextKeypoints.size() << " points" << std::endl;
            
            for (size_t i=0; i<m_nextKeypoints.size(); i++)
            {
                trackedPts.push_back(m_nextKeypoints[i].pt);
                cv::circle(outputFrame, m_nextKeypoints[i].pt, 5, cv::Scalar(255,0,255), -1);
            }
        }
        
        m_prevPts = trackedPts;
        m_nextImg.copyTo(m_prevImg);
    }
    if (m_activeTrackingAlgorithm == TrackingAlgorithmORB)
    {
        m_orbFeatureEngine(m_nextImg, cv::Mat(), m_nextKeypoints, m_nextDescriptors);
        
        if (m_prevKeypoints.size() > 0)
        {
            std::vector< std::vector<cv::DMatch> > matches;
            m_orbMatcher.radiusMatch(m_nextDescriptors, m_prevDescriptors, matches, 10);
            
            for (size_t i=0; i<matches.size(); i++)
            {
                cv::Point prevPt = m_prevKeypoints[matches[i][0].trainIdx].pt;
                cv::Point nextPt = m_nextKeypoints[matches[i][0].queryIdx].pt;
                
                cv::circle(outputFrame, prevPt, 5, cv::Scalar(250,0,250), CV_FILLED);
                cv::line(outputFrame, prevPt, nextPt, CV_RGB(0,250,0));
                cv::circle(outputFrame, nextPt, 3, CV_RGB(0,250,0), CV_FILLED);
            }
        }
        
        m_prevKeypoints.swap(m_nextKeypoints);
        m_nextDescriptors.copyTo(m_prevDescriptors);
    }
    else if(m_activeTrackingAlgorithm == TrackingAlgorithmBRIEF)
    {
        m_fastDetector.detect(m_nextImg, m_nextKeypoints);
        m_briefExtractor.compute(m_nextImg, m_nextKeypoints, m_nextDescriptors);
        
        if (m_prevKeypoints.size() > 0)
        {
            std::vector< std::vector<cv::DMatch> > matches;
            m_orbMatcher.radiusMatch(m_nextDescriptors, m_prevDescriptors, matches, 10);
            
            for (size_t i=0; i<matches.size(); i++)
            {
                cv::Point prevPt = m_prevKeypoints[matches[i][0].trainIdx].pt;
                cv::Point nextPt = m_nextKeypoints[matches[i][0].queryIdx].pt;
                
                cv::circle(outputFrame, prevPt, 5, cv::Scalar(250,0,250), CV_FILLED);
                cv::line(outputFrame, prevPt, nextPt, CV_RGB(0,250,0));
                cv::circle(outputFrame, nextPt, 3, CV_RGB(0,250,0), CV_FILLED);
            }
        }
        
        m_prevKeypoints.swap(m_nextKeypoints);
        m_nextDescriptors.copyTo(m_prevDescriptors);
    }
    
    return true;
}
Ejemplo n.º 16
0
std::string Color::toHumanReadableString(PixelFormat pixelFormat, HumanReadableString humanReadable) const
{
  std::stringstream result;

  if (humanReadable == LongHumanReadableString) {
    switch (getType()) {

      case Color::MaskType:
        result << "Mask";
        break;

      case Color::RgbType:
        if (pixelFormat == IMAGE_GRAYSCALE) {
          result << "Gray " << getGray();
        }
        else {
          result << "RGB "
                 << m_value.rgb.r << " "
                 << m_value.rgb.g << " "
                 << m_value.rgb.b;

          if (pixelFormat == IMAGE_INDEXED)
            result << " Index "
                   << color_utils::color_for_image(*this, pixelFormat);
        }
        break;

      case Color::HsvType:
        if (pixelFormat == IMAGE_GRAYSCALE) {
          result << "Gray " << getGray();
        }
        else {
          result << "HSB "
                 << m_value.hsv.h << "\xB0 "
                 << m_value.hsv.s << " "
                 << m_value.hsv.v;

          if (pixelFormat == IMAGE_INDEXED)
            result << " Index " << color_utils::color_for_image(*this, pixelFormat);
        }
        break;

      case Color::GrayType:
        result << "Gray " << m_value.gray;
        break;

      case Color::IndexType: {
        int i = m_value.index;
        if (i >= 0 && i < (int)get_current_palette()->size()) {
          uint32_t _c = get_current_palette()->getEntry(i);
          result << "Index " << i
                 << " (RGB "
                 << (int)_rgba_getr(_c) << " "
                 << (int)_rgba_getg(_c) << " "
                 << (int)_rgba_getb(_c) << ")";
        }
        else {
          result << "Index "
                 << i
                 << " (out of range)";
        }
        break;
      }

      default:
        ASSERT(false);
        break;
    }
  }
  else if (humanReadable == ShortHumanReadableString) {
    switch (getType()) {

      case Color::MaskType:
        result << "Mask";
        break;

      case Color::RgbType:
        if (pixelFormat == IMAGE_GRAYSCALE) {
          result << "Gry-" << getGray();
        }
        else {
          result << "#" << std::hex << std::setfill('0')
                 << std::setw(2) << m_value.rgb.r
                 << std::setw(2) << m_value.rgb.g
                 << std::setw(2) << m_value.rgb.b;
        }
        break;

      case Color::HsvType:
        if (pixelFormat == IMAGE_GRAYSCALE) {
          result << "Gry-" << getGray();
        }
        else {
          result << m_value.hsv.h << "\xB0"
                 << m_value.hsv.s << ","
                 << m_value.hsv.v;
        }
        break;

      case Color::GrayType:
        result << "Gry-" << m_value.gray;
        break;

      case Color::IndexType:
        result << "Idx-" << m_value.index;
        break;

      default:
        ASSERT(false);
        break;
    }
  }

  return result.str();
}
Ejemplo n.º 17
0
//! Processes a frame and returns output image 
bool FeatureDetectionSample::processFrame(const cv::Mat& inputFrame, cv::Mat& outputFrame)
{
    // display the frame
    inputFrame.copyTo(outputFrame);
    
    // try to find the object in the scene
    if (detectObject) {
        
        // convert input frame to gray scale
        getGray(inputFrame, grayImage);
        
        // prepare the robust matcher and set paremeters
        FeatureDetectionClass rmatcher;
        rmatcher.setConfidenceLevel(0.98);
        rmatcher.setMinDistanceToEpipolar(1.0);
        rmatcher.setRatio(0.65f);
        
        // feature detector setup
        if (m_fdAlgorithmName == "SURF")
        {
            // prepare keypoints detector
            cv::Ptr<cv::FeatureDetector> detector = new cv::SurfFeatureDetector(m_hessianThreshold);
            rmatcher.setFeatureDetector(detector);
        }
        else if (m_fdAlgorithmName == "ORB")
        {
            // prepare feature detector and detect the object keypoints
            cv::Ptr<cv::FeatureDetector> detector = new cv::OrbFeatureDetector(m_nFeatures);
            rmatcher.setFeatureDetector(detector);
        }
        else
        {
            std::cerr << "Unsupported algorithm:" << m_fdAlgorithmName << std::endl;
            assert(false);
        }
            
        // feature extractor and matcher setup
        if (m_feAlgorithmName == "SURF")
        {
            // prepare feature extractor
            cv::Ptr<cv::DescriptorExtractor> extractor = new cv::SurfDescriptorExtractor;
            rmatcher.setDescriptorExtractor(extractor);
            // prepare the appropriate matcher for SURF 
            cv::Ptr<cv::DescriptorMatcher> matcher = new cv::BFMatcher(cv::NORM_L2, false);
            rmatcher.setDescriptorMatcher(matcher);
            
        } else if (m_feAlgorithmName == "ORB")
        {
            // prepare feature extractor
            cv::Ptr<cv::DescriptorExtractor> extractor = new cv::OrbDescriptorExtractor;
            rmatcher.setDescriptorExtractor(extractor);
            // prepare the appropriate matcher for ORB
            cv::Ptr<cv::DescriptorMatcher> matcher = new cv::BFMatcher(cv::NORM_HAMMING, false);
            rmatcher.setDescriptorMatcher(matcher);
            
        } else if (m_feAlgorithmName == "FREAK")
        {
            // prepare feature extractor
            cv::Ptr<cv::DescriptorExtractor> extractor = new cv::FREAK;
            rmatcher.setDescriptorExtractor(extractor);
            // prepare the appropriate matcher for FREAK
            cv::Ptr<cv::DescriptorMatcher> matcher = new cv::BFMatcher(cv::NORM_HAMMING, false);
            rmatcher.setDescriptorMatcher(matcher);
        }
        else {
            std::cerr << "Unsupported algorithm:" << m_feAlgorithmName << std::endl;
            assert(false);
        }
        
        // call the RobustMatcher to match the object keypoints with the scene keypoints
        cv::vector<cv::Point2f> objectKeypoints2f, sceneKeypoints2f;
        std::vector<cv::DMatch> matches;
        cv::Mat fundamentalMat = rmatcher.match(grayImage, // input scene image
                                                objectKeypoints, // input computed object image keypoints
                                                objectDescriptors, // input computed object image descriptors
                                                matches, // output matches
                                                objectKeypoints2f, // output object keypoints (Point2f)
                                                sceneKeypoints2f); // output scene keypoints (Point2f)
        
        if ( matches.size() >= m_minMatches ) {
            
            // draw perspetcive lines (box object in the frame)
            if (m_drawPerspective)
                rmatcher.drawPerspective(outputFrame,
                                         objectImage,
                                         objectKeypoints2f,
                                         sceneKeypoints2f);
            
            // draw keypoint matches as yellow points on the output frame
            if (m_drawMatches)
                rmatcher.drawMatches(outputFrame, 
                                     matches,
                                     sceneKeypoints2f);
            
            // draw epipolar lines
            if (m_drawEpipolarLines)
                rmatcher.drawEpipolarLines(outputFrame,
                                           objectImage,
                                           grayImage,
                                           objectKeypoints2f,
                                           sceneKeypoints2f, 1);
            
            // draw custom points
            if (!customPoints.empty())
            {
                rmatcher.drawCustomPoints(outputFrame,
                                          objectKeypoints2f,
                                          sceneKeypoints2f,
                                          customPoints);
            }
        }
    }
    
    // compute object image keypoints and descriptors
    if (computeObject) {
        
        // select feature detection mechanism
        if ( m_fdAlgorithmName == "SURF" )
        {
            // prepare keypoints detector
            cv::Ptr<cv::FeatureDetector> detector = new cv::SurfFeatureDetector(m_hessianThreshold);
            // Compute object keypoints
            detector->detect(objectImage,objectKeypoints);
            
        }
        else if ( m_fdAlgorithmName == "ORB" )
        {
            // prepare feature detector and detect the object keypoints
            cv::Ptr<cv::FeatureDetector> detector = new cv::OrbFeatureDetector(m_nFeatures);
            // Compute object keypoints
            detector->detect(objectImage,objectKeypoints);
        }
        else {
            std::cerr << "Unsupported algorithm:" << m_fdAlgorithmName << std::endl;
            assert(false);
        }
        
        // select feature extraction mechanism
        if ( m_feAlgorithmName == "SURF" )
        {
            cv::Ptr<cv::DescriptorExtractor> extractor = new cv::SurfDescriptorExtractor;
            // Compute object feature descriptors
            extractor->compute(objectImage,objectKeypoints,objectDescriptors);
        }
        else if ( m_feAlgorithmName == "ORB" )
        {
            cv::Ptr<cv::DescriptorExtractor> extractor = new cv::OrbDescriptorExtractor;
            // Compute object feature descriptors
            extractor->compute(objectImage,objectKeypoints,objectDescriptors);
        }
        else if ( m_feAlgorithmName == "FREAK" )
        {
            cv::Ptr<cv::DescriptorExtractor> extractor = new cv::FREAK;
            // Compute object feature descriptors
            extractor->compute(objectImage,objectKeypoints,objectDescriptors);
        }
        else {
            std::cerr << "Unsupported algorithm:" << m_feAlgorithmName << std::endl;
            assert(false);
        }
        
        // set flags
        computeObject = false;
        detectObject = true;
    }
    return true;
}
Ejemplo n.º 18
0
void doProcessing() {
    //cv::namedWindow(face_window_name,CV_WINDOW_NORMAL); cv::moveWindow(face_window_name, 10, 100);
    if (debug_show_img_d1 == true) {
        cv::namedWindow("debug1",CV_WINDOW_NORMAL); cv::moveWindow("debug1", 60, 220);
        cv::namedWindow("debug2",CV_WINDOW_NORMAL); cv::moveWindow("debug2", 60, 490);
        cv::namedWindow("debug3",CV_WINDOW_NORMAL); cv::moveWindow("debug3", 60, 790);
        cv::namedWindow("debug4",CV_WINDOW_NORMAL); cv::moveWindow("debug4", 60, 30);
    }
    if (debug_show_img_main == true) {
        cv::namedWindow("main",CV_WINDOW_NORMAL); cv::moveWindow("main", 400, 30); cv::resizeWindow("main",1280, 960);
    }
    if (debug_show_img_gray == true) {
        cv::namedWindow("gray",CV_WINDOW_NORMAL); cv::moveWindow("gray", 400, 100); cv::resizeWindow("gray",1280, 960);
    }
    if (debug_show_img_face == true) {
        cv::namedWindow("face",CV_WINDOW_NORMAL); cv::moveWindow("face", 60, 30);
    }
    if (debug_show_img_farne_eyes == true && debug_show_img_main == true && (method == METHOD_FARNEBACK || method == METHOD_BLACKPIXELS)) {
        cv::namedWindow("leftR",CV_WINDOW_NORMAL); cv::moveWindow("leftR", 1300, 800);
        cv::namedWindow("rightR",CV_WINDOW_NORMAL); cv::moveWindow("rightR", 1600, 800);
        cv::namedWindow("left",CV_WINDOW_NORMAL); cv::moveWindow("left", 1300, 500);
        cv::namedWindow("right",CV_WINDOW_NORMAL); cv::moveWindow("right", 1600, 500);
        cv::namedWindow("leftSR",CV_WINDOW_NORMAL); cv::moveWindow("leftSR", 1300, 200);
        cv::namedWindow("rightSR",CV_WINDOW_NORMAL); cv::moveWindow("rightSR", 1600, 200);
    }
    if (debug_show_img_templ_eyes_tmpl == true && method == METHOD_TEMPLATE_BASED) {
        cv::namedWindow("leftSR",CV_WINDOW_NORMAL); cv::moveWindow("leftSR", 1300, 200);
        cv::namedWindow("rightSR",CV_WINDOW_NORMAL); cv::moveWindow("rightSR", 1600, 200);
    }
    if (debug_show_img_templ_eyes_tmpl == true && method == METHOD_TEMPLATE_BASED) {
        cv::namedWindow("left",CV_WINDOW_NORMAL); cv::moveWindow("left", 1300, 500);
        cv::namedWindow("right",CV_WINDOW_NORMAL); cv::moveWindow("right", 1600, 500);
    }
    if (debug_show_img_templ_eyes_cor == true && method == METHOD_TEMPLATE_BASED) {
        cv::namedWindow("leftR",CV_WINDOW_NORMAL); cv::moveWindow("leftR", 1300, 800);
        cv::namedWindow("rightR",CV_WINDOW_NORMAL); cv::moveWindow("rightR", 1600, 800);
    }
    /*
    cv::namedWindow("leftR1",CV_WINDOW_NORMAL); cv::moveWindow("leftR1", 10, 800);
    cv::namedWindow("rightR1",CV_WINDOW_NORMAL); cv::moveWindow("rightR1", 200, 800);
    */
    // cv::namedWindow("Right Eye",CV_WINDOW_NORMAL); cv::moveWindow("Right Eye", 10, 600);
    // cv::namedWindow("Left Eye",CV_WINDOW_NORMAL); cv::moveWindow("Left Eye", 10, 800);
    // createCornerKernels(), at the end // releaseCornerKernels(); // ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2), 43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);

    std::chrono::time_point<std::chrono::steady_clock> t1 = std::chrono::steady_clock::now();
    std::chrono::time_point<std::chrono::steady_clock> t2;
    cv::Mat frame, gray, cflow;
    unsigned int lastFrameNum;
    double lastTimestamp;
    while (true) {
        long unsigned int listSize = frameList.size();
        if (listSize == 0) {
            if (finished == true) {
                break;
            }
            if (canAdd == false) {
                canAdd = true;
            }
            t2 = std::chrono::steady_clock::now();
            std::this_thread::sleep_for(std::chrono::milliseconds(10));
            difftime("debug_t2_perf: waiting for frames", t2, debug_t2_perf);
            continue;
        }

        FrameCarrier fc = frameList.front();
        lastFrameNum = fc.frameNum;
        lastTimestamp = fc.timestamp;
        frameList.pop_front();
        cv::Mat frame = fc.frame;
        // cv::flip(frame, frame, 1);
        double timestamp = fc.timestamp;
        doLog(debug_t2_log, "debug_t2_log: frame time: %lf\n", timestamp);

        t2 = std::chrono::steady_clock::now();
        switch (method) {
            case METHOD_FARNEBACK:
            case METHOD_BLACKPIXELS:
            case METHOD_OPTFLOW:
            case METHOD_TEMPLATE_BASED:
            getGray(frame, &gray);
            break;
            case METHOD_BLACK_PIXELS:
            break;
        }
        difftime("debug_t2_perf: getGray", t2, debug_t2_perf);

        t2 = std::chrono::steady_clock::now();
        switch (method) {
            case METHOD_OPTFLOW:
            optf.run(gray, frame, timestamp, fc.frameNum);
            break;
            case METHOD_FARNEBACK:
            farneback.run(gray, frame, timestamp, fc.frameNum);
            break;
            case METHOD_BLACKPIXELS:
            blackpixels.run(gray, frame, timestamp, fc.frameNum);
            break;
            case METHOD_TEMPLATE_BASED:
            templ.run(gray, frame, timestamp, fc.frameNum);
            break;
            case METHOD_BLACK_PIXELS:
            break;
        }
        difftime("debug_t2_perf_method:", t2, debug_t2_perf_method);


        t2 = std::chrono::steady_clock::now();
        if (debug_show_img_main == true) {
            // flow control
            int c = cv::waitKey(1);
            if((char)c == 'q') {
                grabbing = false;
                break;
            } else if((char)c == 'p') {
                pauseFrames = 1;
            } else if((char)c == 'f') {
                flg = 1;
            } else if (pauseFrames == 1) {
                while (true) {
                    int c = cv::waitKey(10);
                    if((char)c == 'p') {
                        pauseFrames = 0;
                        break;
                    } else if((char)c == 'i') {
                        imwrite("/tmp/frame.png", toSave);
                    } else if((char)c == 'n') {
                        break;
                    } else if((char)c == 's') {
                        // status
                        printStatus();
                        break;
                    }
                }
            }
        }
        difftime("debug_t2_perf: waitkey", t2, debug_t2_perf);
        difftime("debug_t2_perf_whole:", t1, debug_t2_perf_whole);
        t1 = std::chrono::steady_clock::now();
    }
    // end hook
    switch (method) {
        case METHOD_OPTFLOW:
        break;
        case METHOD_FARNEBACK:
        doLog(debug_fb_log_tracking, "debug_fb_log_tracking: F %u T %.3lf status stop\n", lastFrameNum, lastTimestamp);
        farneback.flushMeasureBlinks();
        break;
        case METHOD_BLACKPIXELS:
        break;
        case METHOD_TEMPLATE_BASED:
        templ.flushMeasureBlinks();
        break;
        case METHOD_BLACK_PIXELS:
        break;
    }
    doLog(true, "exiting\n");
}
Ejemplo n.º 19
0
//! Sets the reference frame for latter processing
void FeatureDetectionSample::setReferenceFrame(const cv::Mat& reference)
{
    getGray(reference, objectImage);
    computeObject = true;
}