Пример #1
0
std::vector<cv::Point> ImageProcessor::decimateVerticies(std::vector<cv::Point> src, int epsilon)
{
    vector<Point> approxCurve;
    approxPolyDP(src, approxCurve, epsilon, true);

    return approxCurve;
}
Пример #2
0
vector<vector<Point>> Detect::getPolyCurves(Mat& frame)
{
    // First find the contours in the image
    vector<vector<Point>> contours;
    findContours(frame, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

    // Filter contours by area
    vector<vector<Point>> goodContours;

    for (int i = 0; i < contours.size(); ++i) {
        // an arbitrary amount for now
        if (contourArea(contours[i]) > 5000) {
            goodContours.push_back(contours[i]);
        }
    }

    vector<vector<Point>> polyCurves;
    
    // Get poly curves for every contour
    for (int i = 0; i < goodContours.size(); ++i) {
        vector<Point> current;
        approxPolyDP(goodContours[i], current, 2.0, false);

        if(current.size() > 0){
            polyCurves.push_back(current);
        }
    }

    return polyCurves;
}
Пример #3
0
void trackFilteredObject ( Mat& threshold ) {

  vector <Object> objects;
  Mat temp;
  threshold.copyTo ( temp ) ;
  vector< vector<Point> > contours;
  contours_poly2.clear();

  vector<Vec4i> hierarchy;
  findContours ( temp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
  vector < vector<Point> > contours_poly(contours.size());
  if ( hierarchy.size() > 0 ) {

    int numObjects = hierarchy.size();
    //if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
    if ( numObjects>MAX_NUM_OBJECTS ) return;

    for (int index = 0; index >= 0; index = hierarchy[index][0]) {

      Moments moment = moments((Mat)contours[index]);
      double area = moment.m00;

      if ( area>MIN_OBJECT_AREA ) {
        // here - go to moon - yep indeed
        approxPolyDP ( Mat(contours[index]), contours_poly[index], 40, true );
        contours_poly2.push_back(contours_poly[index]);
      }
    }

    // credits: thanks PowHu for alpha 255, http://stackoverflow.com/questions/15916751/cvscalar-not-displaying-expected-color
    // drawContours ( cameraFeed, contours_poly2, -1, Scalar(94,206,165,255), 5 ) ;
  }
}
void imageProcess::filterWhiteAreas() {

	std::vector<std::vector<cv::Point>> contours; // Vector for storing contour of large white pixels areas
	cv::Mat temp; //Temp Mat to not change the original
	std::vector<cv::Vec4i> hierarchy;

	frame->copyTo(temp);
	//find contours will change the src image, so we use a copy to find large white cluster of pixels
	cv::findContours(temp, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
	std::vector<std::vector<cv::Point>> contours_poly(contours.size());
	std::vector<cv::RotatedRect> boundRect(contours.size());

	for (size_t i = 0; i < contours.size(); i++)
	{

		double a = contourArea(contours[i], false);


		if ((a > whiteAreaMaxLimit)) {

			approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 8, true);
			drawContours(*frame, contours_poly, i, cv::Scalar(0, 0, 0), -1, 8, hierarchy, 0, cv::Point(11, 11));
		}

	}
}
Пример #5
0
bool Contour::update()
{
  //if (!ImageNode::update()) return false;
  if (!Node::update()) return false;

  cv::Mat in = getImage("in");
  if (in.empty()) {
    VLOG(2) << name << " in is empty";
    return false;
  }
 
  if (!isDirty(this, 22)) { return true;}
  
  std::vector<std::vector<cv::Point> > contours_orig;
  cv::Mat in8;
  cv::cvtColor(in, in8, CV_RGB2GRAY);
  cv::findContours(in8, contours_orig, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);

  const float eps = getSignal("epsilon"); // max error of approx
  contours0.resize(contours_orig.size());
  for( size_t k = 0; k < contours_orig.size(); k++ )
    //approxPolyDP(cv::Mat(contours_orig[k]), contours0[k], eps, true);
    approxPolyDP((contours_orig[k]), contours0[k], eps, true);

  cv::Mat out = cv::Mat(Config::inst()->getImSize(), MAT_FORMAT_C3, cv::Scalar(0,0,0,255));

  cv::drawContours( out, contours0, -1, cv::Scalar(255,255,255,255));
                //1, CV_AA, hierarchy, std::abs(_levels) );

  setImage("out", out);

  return true;
}
Пример #6
0
vector<Rect> AvatarDetector::findAvatarsBoundRect(const cv::Mat & image, int thresh)
{
    Mat gray = image.clone();
    
    if (gray.channels() == 4) {
        cvtColor(gray, gray, CV_BGRA2GRAY);
    }
    
    Mat thresholdOutput;
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;
    
    threshold( gray, thresholdOutput, thresh, 255, THRESH_BINARY_INV);
    findContours( thresholdOutput, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    
    vector<vector<Point> > contours_poly(contours.size());
    vector<Rect> boundRect(contours.size());
    
    for(int i = 0; i < contours.size(); i++)
    {
        approxPolyDP( cv::Mat(contours[i]), contours_poly[i], 3, true);
        boundRect[i] = boundingRect( cv::Mat(contours_poly[i]));
    }
    
    return boundRect;
    
}
Пример #7
0
	int contours2( int argc, char** argv)
	{
	    cv::CommandLineParser parser(argc, argv, "{help h||}");
	    if (parser.has("help"))
	    {
	        help();
	        return 0;
	    }
	    Mat img = Mat::zeros(w, w, CV_8UC1);
	    //Draw 6 faces
	    for( int i = 0; i < 6; i++ )
	    {
	        int dx = (i%2)*250 - 30;
	        int dy = (i/2)*150;
	        const Scalar white = Scalar(255);
	        const Scalar black = Scalar(0);
	
	        if( i == 0 )
	        {
	            for( int j = 0; j <= 10; j++ )
	            {
	                double angle = (j+5)*CV_PI/21;
	                line(img, Point(cvRound(dx+100+j*10-80*cos(angle)),
	                    cvRound(dy+100-90*sin(angle))),
	                    Point(cvRound(dx+100+j*10-30*cos(angle)),
	                    cvRound(dy+100-30*sin(angle))), white, 1, 8, 0);
	            }
	        }
	
	        ellipse( img, Point(dx+150, dy+100), Size(100,70), 0, 0, 360, white, -1, 8, 0 );
	        ellipse( img, Point(dx+115, dy+70), Size(30,20), 0, 0, 360, black, -1, 8, 0 );
	        ellipse( img, Point(dx+185, dy+70), Size(30,20), 0, 0, 360, black, -1, 8, 0 );
	        ellipse( img, Point(dx+115, dy+70), Size(15,15), 0, 0, 360, white, -1, 8, 0 );
	        ellipse( img, Point(dx+185, dy+70), Size(15,15), 0, 0, 360, white, -1, 8, 0 );
	        ellipse( img, Point(dx+115, dy+70), Size(5,5), 0, 0, 360, black, -1, 8, 0 );
	        ellipse( img, Point(dx+185, dy+70), Size(5,5), 0, 0, 360, black, -1, 8, 0 );
	        ellipse( img, Point(dx+150, dy+100), Size(10,5), 0, 0, 360, black, -1, 8, 0 );
	        ellipse( img, Point(dx+150, dy+150), Size(40,10), 0, 0, 360, black, -1, 8, 0 );
	        ellipse( img, Point(dx+27, dy+100), Size(20,35), 0, 0, 360, white, -1, 8, 0 );
	        ellipse( img, Point(dx+273, dy+100), Size(20,35), 0, 0, 360, white, -1, 8, 0 );
	    }
	    //show the faces
	    namedWindow( "image", 1 );
	    imshow( "image", img );
	    //Extract the contours so that
	    vector<vector<Point> > contours0;
	    findContours( img, contours0, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE);
	
	    contours.resize(contours0.size());
	    for( size_t k = 0; k < contours0.size(); k++ )
	        approxPolyDP(Mat(contours0[k]), contours[k], 3, true);
	
	    namedWindow( "contours", 1 );
	    createTrackbar( "levels+3", "contours", &levels, 7, on_trackbar );
	
	    on_trackbar(0,0);
	    waitKey();
	
	    return 0;
	}
Пример #8
0
void KnnMDMethod::detect(const cv::Mat& input) {
    std::vector<cv::Vec4i> hierarchy;
    cv::Mat estForeground,      //mog2 result
       estBackground,          //mog2 result
       contoursMat,            //tmp required to show all steps
       dilated,                //after dilatation
       tmp,tmpF;
    input.copyTo(tmp);

    _knn->apply(input, estForeground);
    _knn->getBackgroundImage(estBackground);


    display(ConfigManager::VIEW_KNN_BACKGROUND, estBackground);
    display(ConfigManager::VIEW_KNN_FOREGROUND, estForeground);



    dilate(estForeground,dilated, dilateElement);
    display(ConfigManager::VIEW_KNN_DILATATION, dilated);

    dilated.copyTo(contoursMat);
    dilated.copyTo(tmpF);


    std::vector<std::vector<cv::Point> > contours;
    if(TimeManager::getTimeManager().time() % ConfigManager::getConfigManager().get<int>(ConfigManager::MD_DETECTION_STEP) == 0) {
        findContours( contoursMat, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0)  );

        std::vector<cv::Rect> boundRect(contours.size());
        std::vector<std::vector<cv::Point> > contoursPoly(contours.size());

        for( unsigned long i = 0; i< contours.size(); i++  )
        {
            approxPolyDP(cv::Mat(contours[i]),contoursPoly[i],10,false);
            boundRect[i] = boundingRect(cv::Mat(contoursPoly[i]));

            rectangle(tmp, boundRect[i].tl(), boundRect[i].br(), cv::Scalar(0,255,0), 2,8,0);
            std::vector<cv::Rect> found, found_filtered;
            cv::Mat img1 = tmp(boundRect[i]);
            cv::Mat imgF = tmpF(boundRect[i]);
            cv::Mat img, imgFF;
            resize(img1,img,cv::Size((img1.cols/(double)img1.rows)*ConfigManager::getConfigManager().get<int>(ConfigManager::MD_GROUP_SIZE_FIX),ConfigManager::getConfigManager().get<int>(ConfigManager::MD_GROUP_SIZE_FIX)));
            resize(imgF,imgFF,cv::Size((imgF.cols/(double)imgF.rows)*ConfigManager::getConfigManager().get<int>(ConfigManager::MD_GROUP_SIZE_FIX),ConfigManager::getConfigManager().get<int>(ConfigManager::MD_GROUP_SIZE_FIX)));


            if(!(boundRect[i].width > ConfigManager::getConfigManager().get<double>(ConfigManager::MD_GROUP_WINDOW_TRESH) * input.rows || boundRect[i].width > ConfigManager::getConfigManager().get<double>(ConfigManager::MD_GROUP_WINDOW_TRESH) * input.rows)) {


            Group group(img1.cols/(double)img.cols, img1.rows/(double)img.rows,boundRect[i].x, boundRect[i].y,img, imgFF, boundRect[i]);
            DataManager::getDataManager().addGroup(group);
        }
    }
    }

    display(ConfigManager::VIEW_KNN_RESULT, tmp);



}
void MotionDetection::BackGroundDetection(Mat fgMaskMOG, Mat mask, double ScaleFactor)
{

  int min_size = 1, max_size = 10000;
  vector<vector<Point> > contours;
  erode(fgMaskMOG,fgMaskMOG,Mat());
  dilate(fgMaskMOG,fgMaskMOG,Mat());
  findContours( fgMaskMOG, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
  

  // findContours( tempMog, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE,Point(0, 0));
 
  vector<Rect> boundRect( contours.size() );
  vector<vector<Point> > contours_poly( contours.size() );
 
  // double smallest_area  = contourArea( contours[0],false);
  for( int i = 0; i< contours.size(); i++ )
  { 
    approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
    boundRect[i] = boundingRect( Mat(contours_poly[i]) );
    // rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), Scalar( 255,255,255), -1, 8, 0 );
    //drawContours( drawing, contours, i, Scalar(255,255,255), CV_FILLED, 8, hierarchy);      
  }
  
  Mat drawing = Mat::zeros( fgMaskMOG.size(), CV_8UC1);
  for( int i = 0; i< contours.size(); i++ )
  {
    //need to make sure what's the exact min_size and max_size
    if(boundRect[i].area() < min_size * ScaleFactor * ScaleFactor 
      || boundRect[i].area() > max_size * ScaleFactor * ScaleFactor)
    {
      continue;
    }
    rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), Scalar(255,255,255), -1, 8, 0);
  }
  resize(drawing,
         drawing,
         Size(drawing.cols / ScaleFactor, drawing.rows / ScaleFactor),
         0,
         0,
         INTER_NEAREST);
  
  if(option_str == "-b")
  {
    static int counter = 0;
    String output_str = outPut_Mask_Path + NumberToString(++counter) + ".png";
    imwrite(output_str, drawing);
  }
  for(int i = 0; i < drawing.cols; i++)
  {
    for(int j = 0; j < drawing.rows; j++)
    { 
        Point p(i,j);
        if(drawing.at<uchar>(p) == 255)
        {  
          mask.at<uchar>(p) += mask_add_step;
        }
    }
  }
}
void drawCroppedImage(Mat& sourceImg, vector<Point> ROI){
	int max_x = 0;
	int max_y = 0;
	int min_x = 99999;
	int min_y = 99999;
	
	Mat mask = cvCreateMat(sourceImg.rows, sourceImg.cols, CV_8UC1);
	for(int i=0; i<mask.cols; i++)
	   for(int j=0; j<mask.rows; j++)
		   mask.at<uchar>(Point(i,j)) = 0;

	vector<Point> ROI_Poly;
	approxPolyDP(ROI, ROI_Poly, 1.0, true);

	for (int i = 0; i < ROI.size(); i++){
		if (ROI[i].x > max_x){
			max_x = ROI[i].x;}
		if (ROI[i].x < min_x){
			min_x = ROI[i].x;}
		if (ROI[i].y > max_y){
			max_y = ROI[i].y;}
		if (ROI[i].y < min_y){
			min_y = ROI[i].y;}
	}
	
	fillConvexPoly(mask, &ROI_Poly[0], ROI_Poly.size(), 255, 8, 0); 
	// Sanity check for outside the edges of the source image
	/*max_x = (max_x > sourceImg.cols) ? sourceImg.cols : max_x;
	max_y = (max_y > sourceImg.rows) ? sourceImg.rows : max_y;
	min_x = (min_x < 0 ) ? 0 : min_x;
	min_y = (min_y < 0 ) ? 0 : min_y;
	
	int width = max_x - min_x;
	int height = max_y - min_y;*/
	
//	int topLeftX = centerX - width  * scaleX / 2;
//	int topLeftY = centerY - height * scaleY / 2;
	
//	cout << topLeftX << " " << topLeftY << endl;
	
//	topLeftX = max(topLeftX, 0);
//	topLeftY = max(topLeftY, 0);
	
	
	// Move mask to the origin
//	translateImg(mask, (-1 * min_x) , (-1 * min_y)  );//, 50);
	
//	cout << mask.size() << endl;
			
//	Mat tmp = sourceImg(Rect(min_x, min_y, width, height) );
	
//	resize(tmp, tmp, Size(width * scaleX, height * scaleY), 0, 0, INTER_CUBIC);
//	resize(mask, mask, Size(mask.cols * scaleX, mask.rows * scaleY), 0, 0, INTER_CUBIC);
	
	// By playing around, it seems that the thing to do is move the mask to the origin, where it is 
	// applied to the dstImg(rectangle). Then that masked image is applied to the rectangle in the 
	// destination image. We don't need to move the mask to line up with the destination image. 
//	tmp.copyTo(dstImg(Rect(topLeftX, topLeftY, width * scaleX, height * scaleY) )  , mask);
}
Пример #11
0
void transformScrabble(Mat img, Mat& img_tr, Rect& big_rect)
{
	// pre-process the input image
	Mat img_contours = processInput(img);

	 // find all the contours
	 vector<vector<Point> > contours;
	 vector<Vec4i> hierarchy;

	 findContours(img_contours, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

	 // biggest rectangle
	 int rect_size_max = 0;

	 for(auto i: contours)
	 {
		 vector<Point> contours_poly;

		 // approximate contours to polygons
		 approxPolyDP(Mat(i), contours_poly, 4, true);

		 // only process rectangle polygons
		 if(contours_poly.size() == 4 && isContourConvex(contours_poly))
		 {
			 int rect_size = contourArea(contours_poly, false);

			 // area of the polygon
			 if(rect_size > rect_size_max)
			 {
				 // save the biggest rectangle
				 rect_size_max = rect_size;
				 big_rect = boundingRect(Mat(contours_poly));
			 }
		 }
	 }

	 // size of the output image fitting the OCR matching size
	 Size img_tr_size;
	 img_tr_size.width = case_size * 9;
	 img_tr_size.height = case_size * 9;

	 // grid coordinates
	 Point2f in_c[4], out_c[4];

	 // fitting the size of the biggest rectangle
	 in_c[0] = Point2f(big_rect.x,                  big_rect.y);
	 in_c[1] = Point2f(big_rect.x + big_rect.width, big_rect.y);
	 in_c[2] = Point2f(big_rect.x + big_rect.width, big_rect.y + big_rect.height);
	 in_c[3] = Point2f(big_rect.x,                  big_rect.y + big_rect.height);

	 // fitting the size of the OCR matching size
	 out_c[0] = Point2f(0,                 0);
	 out_c[1] = Point2f(img_tr_size.width, 0);
	 out_c[2] = Point2f(img_tr_size.width, img_tr_size.height);
	 out_c[3] = Point2f(0,                 img_tr_size.height);

	// perspective transform from the input image
	warpPerspective(img, img_tr, getPerspectiveTransform(in_c, out_c), img_tr_size);
}
bool CColor::existColor(Mat imgCanny)
{
    findContours(imgCanny, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
    for (size_t i = 0; i< contours.size(); i++)
    {
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);// 5, true); //
        if(fabs(contourArea(approx)) > 1000 && approx.size() == 4 ){
            if(! fig.isSquare( approx ))
                return true;
        }
    }
    return false;
}
/* Back ground detection*/
void BackGroundDetection(Mat frame, Mat mask,BackgroundSubtractorMOG2 *pMog)
{
  //copy and resize
  Mat frame_copy = frame.clone(),fore;
  if(BG_scale_factor != 1.0f) 
    resize(frame_copy, frame_copy, Size(), BG_scale_factor, BG_scale_factor, INTER_AREA);
  pMog->operator ()(frame_copy,fore);
  vector<vector<Point> > contours;
  erode(fore,fore,Mat());
  dilate(fore,fore,Mat());
  findContours(fore, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE) ;
  vector<Rect> boundRect( contours.size() );
  vector<vector<Point> > contours_poly( contours.size() );
  vector<Vec4i> hierarchy;
  Mat drawing = Mat::zeros(fore.size(),CV_8UC1);
  // double smallest_area  = contourArea( contours[0],false);
  for( int i = 0; i< contours.size(); i++ )
  { 
    approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
    boundRect[i] = boundingRect( Mat(contours_poly[i]) );
    
    if( (contourArea( contours[i],false) >= 100) 
          && (contourArea( contours[i],false) < video_size.area() * 0.95))
    {
      drawContours( drawing, contours, i, Scalar(255,255,255), CV_FILLED, 8, hierarchy);
    }
    //the min_size and max_size here should be fixed
    // if(boundRect[i].area() >= 100 && boundRect[i].area() < video_size.area() * 0.95)
    // { 
    //   // rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), Scalar(255,255,255), -1, 8, 0); 
    //       drawContours( drawing, contours, i, Scalar(255,255,255), CV_FILLED, 8, hierarchy);    
    // } 
  }
  // imshow("drawing-scale",drawing);
  if(BG_scale_factor != 1.0f)
    resize(drawing, drawing, Size(video_size.width,video_size.height), 0,0, INTER_NEAREST);
  // imshow("drawing-original",drawing);
  for(int i = 0; i < mask.cols; i++)
  {
    for(int j = 0; j < mask.rows; j++)
    {
       Point p = Point(i,j);
        if(drawing.at<uchar>(p) == 255)
           mask.at<uchar>(p) += mask_add_step;
    }
  }
  

}
Пример #14
0
void ImageUtils::findMaskBoundingRectangles(Mat& mask, vector<Rect>& targetsBoundingRectanglesOut) {
	targetsBoundingRectanglesOut.clear();
	
	vector< vector<Point> > contours;
	vector<Vec4i> hierarchy;

	findContours(mask, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

	vector<vector<Point> > contours_poly(contours.size());
	targetsBoundingRectanglesOut.resize(contours.size());	
	int contoursSize = contours.size();

	#pragma omp parallel for schedule(dynamic)
	for (int i = 0; i < contoursSize; ++i) {
		approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true);
		targetsBoundingRectanglesOut[i] = boundingRect(Mat(contours_poly[i]));
	}
}
Пример #15
0
double FillingRate(Mat src, int size) {
    Mat src_gray;
    cvtColor(src, src_gray, CV_BGR2GRAY);
    blur(src_gray, src_gray, Size(3, 3));

    Mat threshold_output;
    vector<vector<Point> > contours;
    vector<Vec4i> hierarchy;

    /// Detect edges using Threshold
    threshold(src_gray, threshold_output, 253, 255, THRESH_BINARY);
    //imshow("", threshold_output); waitKey(0);
    /// Find contours
    findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

    /// Approximate contours to polygons + get bounding rects and circles
    vector<vector<Point> > contours_poly(contours.size());
    vector<Rect> boundRect(contours.size());
    vector<Point2f>center(contours.size());
    vector<float>radius(contours.size());

    for (int i = 0; i < contours.size(); i++)
    {
        approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true);
        minEnclosingCircle((Mat)contours_poly[i], center[i], radius[i]);
    }

    /// Draw polygonal contour + bonding rects + circles
    Mat drawing = Mat::zeros(threshold_output.size(), CV_8UC3);
    for (int i = 0; i< contours.size(); i++)
    {
        //cout << arcLength(contours[i], true) << endl;
        Scalar color = Scalar(255, 255, 255);
        drawContours(drawing, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point());
        circle(drawing, center[i], (int)radius[i], color, 2, 8, 0);
    }
    size = windowSize*windowSize - countNonZero(threshold_output);
    /// Show in a window
    //namedWindow("Contours", CV_WINDOW_AUTOSIZE); imshow("Contours", drawing); waitKey(0);
    cout << double(size) / (double(pow(radius[1], 2))*3.14);
    return double(size) / (double(pow(radius[1], 2))*3.14);


}
Пример #16
0
Mat vehicle_det::get_mask(Mat & src)
{
    //cout<<__PRETTY_FUNCTION__<<endl;
    /* ROI by creating mask for the parallelogram */
    Mat mask = Mat(src.rows, src.cols, CV_8UC1);

    // Create black image with the same size as the original
    for(int i = 0; i < mask.cols; i++)
        for(int j = 0; j < mask.rows; j++)
        {
            mask.at<uchar>(Point(i, j)) = 0;
        }

    // Create Polygon from vertices
    vector<Point> ROI_Poly;
    approxPolyDP(ROI_Vertices, ROI_Poly, 1.0, true);
    // Fill polygon white
    fillConvexPoly(mask, &ROI_Poly[0], ROI_Poly.size(), 255, 8, 0);
    // Cut out ROI and store it in imageDest
    return mask;
}
Пример #17
0
			bool SegmentProcessor::ExtractBasicSegmentFeatures(SuperPixel& sp, const Mat& cimg, const Mat& dmap)
			{
				if(countNonZero(sp.mask) < 200)
					return false;

				// edge detection
				Mat edgemap;
				cv::Canny(sp.mask*150, edgemap, 100, 200);
				//imshow("edge", edgemap);
				//waitKey(0);

				Contours curves;
				std::vector<cv::Vec4i> hierarchy;
				findContours( edgemap, curves, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_NONE );

				sp.original_contour = curves[0];
				approxPolyDP(sp.original_contour, sp.approx_contour, cv::arcLength(cv::Mat(sp.original_contour), true)*0.02, true);
				sp.area = countNonZero(sp.mask);
				sp.box = boundingRect(sp.approx_contour);
				sp.perimeter = arcLength(curves[0], true);
				sp.isConvex = isContourConvex(sp.approx_contour);
				sp.centroid.x = 0;
				sp.centroid.y = 0;
				for (int r=sp.box.y; r<sp.box.br().y; r++)
				{
					for(int c=sp.box.x; c<sp.box.br().x; c++)
					{
						sp.centroid.x += c*sp.mask.at<uchar>(r,c);
						sp.centroid.y += r*sp.mask.at<uchar>(r,c);
					}
				}
				sp.centroid.x /= sp.area;
				sp.centroid.y /= sp.area;

				sp.meanDepth = mean(dmap, sp.mask).val[0];

				return true;
			}
// WARNING! Must filter terrain out before
bool PixelClassifier::detectBall(Point2f &outputCenter, float &outputRadius) {
    Mat ballTresh;
    getOneClass(ballTresh, BALLE);

    Mat out = sourceImage.clone();

    vector< Point > *contour;
    contour = extractBiggestConnectedComposant(ballTresh, ballTresh);

    outputCenter = Point2f(-1,-1);
    outputRadius = -1;

    bool ballVisible = (contour != NULL);
    if (ballVisible){
        vector<Point> poly;

        approxPolyDP( Mat(*contour), poly, 3, true );
        minEnclosingCircle( (Mat)poly, outputCenter, outputRadius);

        // // for debugging display :
        //circle(ballTresh, outputCenter, (int)outputRadius,Scalar(255,255,255) , 2, 8, 0 );
    }
    return ballVisible;
}
Пример #19
0
/*
 * Detects a octagon (stop sign)
 *      - 8 vertices
 *      - angles are ~135 degrees -> cos(135)~-.70
 * param frame: the image frame to process
 * param dist: pointer to the distance to stop sign
 * return 0: success
 * return 1: error
 */
Mat shapeDetect(Mat frame, float *dist)
{
    if (frame.empty())
    {
       cout<<"bad frame \n";
       return Mat();
    }

    // filter image
    GaussianBlur(frame, frame, Size(7,7), 1.5, 1.5);

    // Convert to binary image using Canny
    Mat bw;
    Canny(frame, bw, 50, 200, 5);

    // increase detected pixels
    dilate(bw, bw, Mat(), Point(-1,-1));

    // Find contours
    vector<vector<Point> > contours;
    findContours(bw.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

    // vector approx will contain the vertices of the polygonal approximation for the contour
    vector<Point> approx;

    // Loop through all the contours and get the approximate polygonal curves for each contour
    for (unsigned int i = 0; i < contours.size(); i++)
    {
        // Approximate contour with accuracy proportional to the contour perimeter
        approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);

        // Skip small or non-convex objects
        if (fabs(contourArea(contours[i])) < 200 || !isContourConvex(approx))
            continue;

        // possible octagon
        if (approx.size() == 8)
        {
            // Number of vertices of polygonal curve
            int vtc = approx.size();

            // Get the cosines of all corners
            vector<double> cos;
            for (int j = 2; j < vtc+1; j++)
                cos.push_back(angle(approx[j%vtc], approx[j-2], approx[j-1]));

            // Sort ascending the cosine values
            sort(cos.begin(), cos.end());

            // Get the lowest and the highest cosine
            double mincos = cos.front();
            double maxcos = cos.back();

            // Use the degrees obtained above and the number of vertices to determine the shape of the contour
            // angle are pretty relaxed in case camera isn't straight on
            if (vtc == 8 && mincos >= -0.85 && maxcos <= -0.55)
            {
                // found a octagon (stop sign) -> caclulate distance
                *dist = dist2obj(contours[i]);
                setLabel(bw, "stopsign", contours[i]);
            }
        }
    }
    return bw;
}
Пример #20
0
void detect2(Mat img, vector<Mat>& regionsOfInterest,vector<Blob>& blobs){
/*	Mat blurred; 
	GaussianBlur(img, blurred, Size(), _SharpSigma, _SharpSigma);
	Mat lowContrastMask = abs(img - blurred) < _SharpThreshold;
	Mat sharpened = img*(1+_SharpAmount) + blurred*(-_SharpAmount);
	img.copyTo(sharpened, lowContrastMask);
	sharpened.copyTo(img);*/
	/*************INIZIALIZZAZIONI**********/
	Mat gray; 
	Mat out = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat masked = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat morph = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat bwmorph = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat cont = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat maskHSV = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat whiteMaskMasked = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat whiteMaskOrig = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat Bands[3];
	Mat noBackMask = Mat::zeros(Size(WIDTH,HEIGH), CV_8U);
	Mat kernelEr = getStructuringElement(MORPH_ELLIPSE,Size(5,5));
	Mat thMasked; Mat thOrig; Mat bwOrig; Mat bwNoBackMask;
	Mat kernelOp = getStructuringElement(MORPH_ELLIPSE,Size(13,13));
	vector<Mat> BGRbands;  split(img,BGRbands);
	vector< vector<Point> > contours;
	/***************************************/
	/*cvtColor(img,gray,CV_BGR2GRAY);
	gray = (gray!=0);
	imshow("gray",gray);*/
	/*Rimozione Ombre e Background*/
//	masked = applyMaskBandByBand(maskHSV,BGRbands); split(masked,BGRbands);
	
	/*Rimozione sfondo e sogliatura per videnziare esclusivamente ciò che è bianco*/
	noBackMask = backgroundRemoval(img);
	masked = applyMaskBandByBand(noBackMask,BGRbands);
/*
	whiteMaskOrig = computeWhiteMaskLight(img);
	whiteMaskOrig = whiteMaskOrig + computeWhiteMaskShadow(img);

	whiteMaskMasked = computeWhiteMaskLight(masked);
	whiteMaskMasked = whiteMaskMasked + computeWhiteMaskShadow(masked);
*/
	CBlobResult blobsRs;
	blobsRs = computeWhiteMaskOtsu(img, img, blobsRs, img.rows*img.cols, img.rows*img.cols, 0.8, 0.8, 30, 200, 0);
	
	//Mat newimg(img.size(),img.type());
    whiteMaskOrig.setTo(0);
    for(int i=0;i<blobsRs.GetNumBlobs();i++){
			 blobsRs.GetBlob(i)->FillBlob(whiteMaskOrig,CV_RGB(255,255,255),0,0,true);
    }

	threshold(masked,whiteMaskMasked,0,255,THRESH_BINARY);
	cvtColor(whiteMaskMasked,whiteMaskMasked,CV_BGR2GRAY);
		cout << whiteMaskMasked.type() << " " << whiteMaskOrig.type() << endl;
	bitwise_or(whiteMaskMasked,whiteMaskOrig,thOrig);
	masked = applyMaskBandByBand(thOrig,BGRbands);
#if DO_MORPH
	/*Operazioni morfologiche per poter riempire i buchi e rimuovere i bordi frastagliati*/
	dilate(masked,morph,kernelEr);
	erode(morph,morph,kernelEr);
	
	erode(morph,morph,kernelOp);
	dilate(morph,morph,kernelOp);
#else
	morph = masked;
#endif
	/*Ricerca componenti connesse e rimozione in base all'area*/
	cvtColor(morph,bwmorph,CV_BGR2GRAY);
	findContours(bwmorph, contours, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
	vector<double> areas = computeArea(contours);
	for(int j = areas.size()-1; j>=0; j--){
		if(areas.at(j)>MAX_AREA || areas.at(j)<MIN_AREA )
			contours.erase(contours.begin()+j);
	}

	/*Calcolo Bounding Rectangle a partire dall'immagine con componenti connesse di interesse*/
	 vector<Rect> boundRect( contours.size() );
	 vector<vector<Point> > contours_poly( contours.size() );
	 vector<Point2f>center( contours.size() ); 
	 vector<float>radius( contours.size() );
	 /*Costruzione immagine finale ed estrazione regioni di interesse*/
	for (int idx = 0; idx < contours.size(); idx++){
		Blob b; b.originalImage = &img;
		Scalar color(255);
		approxPolyDP( Mat(contours[idx]), contours_poly[idx], 3, true );
		boundRect[idx] = boundingRect( Mat(contours_poly[idx]) );
		
		minEnclosingCircle( (Mat)contours_poly[idx], center[idx], radius[idx] );
	//	Rect tmpRect(center[idx].x-boundRect[idx].width/2,center[idx].y-boundRect[idx].height/2,boundRect[idx].width,boundRect[idx].height);
		Rect tmpRect(center[idx].x-radius[idx],center[idx].y-radius[idx],radius[idx]*2,radius[idx]*2);
		//Rect tmpRect = boundRect[idx];
		Rect toPrint; 
		tmpRect += Size(tmpRect.width*RECT_AUGMENT ,tmpRect.height*RECT_AUGMENT);			  // Aumenta area di RECT_ARGUMENT
		tmpRect -= Point((tmpRect.width*RECT_AUGMENT)/2 , (tmpRect.height*RECT_AUGMENT)/2 ); // Ricentra il rettangolo
		
		drawContours(cont, contours, idx, color, CV_FILLED, 8);
		if(tmpRect.x>0 && tmpRect.y>0 && tmpRect.x+tmpRect.width < morph.cols && tmpRect.y+tmpRect.height < morph.rows){ //Se il nuovo rettangolo allargato
																														// NON esce fuori dall'immagine, accettalo
			regionsOfInterest.push_back(masked(tmpRect));
			b.cuttedWithBack = img(tmpRect);
			b.cuttedImages = masked(tmpRect);
			b.blobsImage = cont(tmpRect);
			b.rectangles = tmpRect;
			toPrint = tmpRect;
		}
		else{
			toPrint = boundRect[idx];
			regionsOfInterest.push_back(masked(boundRect[idx]));
			b.cuttedImages = masked(boundRect[idx]);
			b.cuttedWithBack = img(boundRect[idx]);
			b.rectangles = boundRect[idx];
			b.blobsImage = cont(boundRect[idx]);
		}
		Point centroid = computeCentroid(contours[idx]);
		b.centroid = centroid;
		b.area = contourArea(contours[idx]);
		b.distance = HEIGH - centroid.y;
		
		/*rectangle( cont, toPrint.tl(), toPrint.br(), color, 2, 8, 0 );
		circle( cont, center[idx], (int)radius[idx], color, 2, 8, 0 );*/
		blobs.push_back(b);
	}
	
	//out = out+cont;
	bitwise_xor(out,cont,out);
	
	/*imshow("img",img);
	imshow("out",out);
	waitKey(0);*/
}
Пример #21
0
void SquareOcl::find_squares_gpu( const Mat& image, vector<vector<Point> >& squares )
{
    squares.clear();

    Mat gray;
    cv::ocl::oclMat pyr_ocl, timg_ocl, gray0_ocl, gray_ocl;

    // down-scale and upscale the image to filter out the noise
    ocl::pyrDown(ocl::oclMat(image), pyr_ocl);
    ocl::pyrUp(pyr_ocl, timg_ocl);

    vector<vector<Point> > contours;
    vector<cv::ocl::oclMat> gray0s;
    ocl::split(timg_ocl, gray0s); // split 3 channels into a vector of oclMat
    // find squares in every color plane of the image
    for( int c = 0; c < 3; c++ )
    {
        gray0_ocl = gray0s[c];
        // try several threshold levels
        for( int l = 0; l < SQUARE_OCL_THRESH_LEVEL_H; l++ )
        {
            // hack: use Canny instead of zero threshold level.
            // Canny helps to catch squares with gradient shading
            if( l == 0 )
            {
                // do canny on OpenCL device
                // apply Canny. Take the upper threshold from slider
                // and set the lower to 0 (which forces edges merging)
                cv::ocl::Canny(gray0_ocl, gray_ocl, 0, SQUARE_OCL_EDGE_THRESH_H, 5);
                // dilate canny output to remove potential
                // holes between edge segments
                ocl::dilate(gray_ocl, gray_ocl, Mat(), Point(-1,-1));
                gray = Mat(gray_ocl);
            }
            else
            {
                // apply threshold if l!=0:
                //     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
                cv::ocl::threshold(gray0_ocl, gray_ocl, (l+1)*255/SQUARE_OCL_THRESH_LEVEL_H, 255, THRESH_BINARY);
                gray = gray_ocl;
            }

            // find contours and store them all as a list
            findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

            vector<Point> approx;
            // test each contour
            for( size_t i = 0; i < contours.size(); i++ )
            {
                // approximate contour with accuracy proportional
                // to the contour perimeter
                approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);

                // square contours should have 4 vertices after approximation
                // relatively large area (to filter out noisy contours)
                // and be convex.
                // Note: absolute value of an area is used because
                // area may be positive or negative - in accordance with the
                // contour orientation
                if( approx.size() == 4 &&
                        fabs(contourArea(Mat(approx))) > 1000 &&
                        isContourConvex(Mat(approx)) )
                {
                    double maxCosine = 0;

                    for( int j = 2; j < 5; j++ )
                    {
                        // find the maximum cosine of the angle between joint edges
                        double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
                        maxCosine = MAX(maxCosine, cosine);
                    }

                    // if cosines of all angles are small
                    // (all angles are ~90 degree) then write quandrange
                    // vertices to resultant sequence
                    if( maxCosine < 0.3 )
                        squares.push_back(approx);
                }
            }
        }
    }
}
Пример #22
0
void Target::findLaser(Mat imgInput, Mat *imgOutput)
{
	Mat hsv_img, binary, cont, imgCopy;
	imgCopy = imgInput;
	
	cvtColor(imgCopy, hsv_img, CV_BGR2HSV);

	Mat imgThresholded;
	int iLowH = colorRatio[0];
	int iHighH = colorRatio[1];

	int iLowS = colorRatio[2];
	int iHighS = colorRatio[3];

	int iLowV = colorRatio[4];
	int iHighV = colorRatio[5];
	inRange(hsv_img, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded); //Threshold the image

	//morphological opening (remove small objects from the foreground)
	erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
	dilate(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

	//morphological closing (fill small holes in the foreground)
	dilate(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
	erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));

	binary = imgThresholded;
	vector<vector<Point>> contours;
	vector<Point> contours_poly;
	Rect boundRect;
	binary.copyTo(cont);
	findContours(cont, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
	int max = 0, i_cont = -1;
	Mat drawing = Mat::zeros(cont.size(), CV_8UC3);
	for (int i = 0; i< contours.size(); i++)
	{
		if (abs(contourArea(Mat(contours[i]))) > max)
		{
			max = abs(contourArea(Mat(contours[i])));
			i_cont = i;
		}
	}
	if (i_cont >= 0)
	{
		approxPolyDP(Mat(contours[i_cont]), contours_poly, 3, true);
		boundRect = boundingRect(Mat(contours_poly));
		fillConvexPoly(imgCopy, contours_poly, contours_poly.size());
		rectangle(imgCopy, boundRect.tl(), boundRect.br(), Scalar(125, 250, 125), 2, 8, 0);
		line(imgCopy, boundRect.tl(), boundRect.br(), Scalar(250, 125, 125), 2, 8, 0);
		line(imgCopy, Point(boundRect.x + boundRect.width, boundRect.y), Point(boundRect.x, boundRect.y + boundRect.height), Scalar(250, 125, 125), 2, 8, 0);
		string s;
		stringstream out;
		out << boundRect.x + boundRect.width / 2 << "x" << boundRect.y + boundRect.height / 2;
		Point contour_center;
		contour_center.x = boundRect.x + boundRect.width / 2;
		contour_center.y = boundRect.y + boundRect.height / 2;
		*laser = contour_center;
		s = out.str();
		putText(imgCopy, s, Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(20, 40, 80), 3, 8);
		drawContours(drawing, contours, i_cont, Scalar(125, 125, 250), 2);
	}
	*imgOutput = imgCopy;
	//imshow("kolory", binary);
	
}
Пример #23
0
int shapeDetection(Mat src, int size) {

    // Do convex hull refinement
    vector<Point> hull = convexHullExtraction(src);

    ///*
    std::vector<Point> approx;
    Mat dst = src.clone();
    int shape = -1;

    // Approximate contour with accuracy proportional to the contour perimeter
    approxPolyDP(Mat(hull), approx, arcLength(Mat(hull), true)*0.3, true);

    //cout << approx.size() << endl;
    if (approx.size() == 4) {
        // Number of vertices of polygonal curve
        int vtc = approx.size();

        // Get the cosines of all corners
        std::vector<double> cos;
        for (int j = 2; j < vtc + 1; j++)
            cos.push_back(angle(approx[j%vtc], approx[j - 2], approx[j - 1]));

        // Sort ascending the cosine values
        std::sort(cos.begin(), cos.end());

        // Get the lowest and the highest cosine
        double mincos = cos.front();
        double maxcos = cos.back();

        // Use the degrees obtained above and the number of vertices
        // to determine the shape of the contour
        if (vtc == 4 && mincos >= -0.1 && maxcos <= 0.3) {
            setLabel(dst, "RECT", hull);
            shape = 4;
        }
        else if (vtc == 5 && mincos >= -0.34 && maxcos <= -0.27) {
            setLabel(dst, "PENTA", hull);
            shape = 5;
        }
        else if (vtc == 6 && mincos >= -0.55 && maxcos <= -0.45) {
            setLabel(dst, "HEXA", hull);
            shape = 6;
        }
    }
    else {
        // Detect and label circles
        double fillrate = FillingRate(src,size);// , hull);
        if (fillrate > 0.89) {
            setLabel(dst, "CIR", hull);
            shape = 0;
        }
        else if (fillrate < 0.78&&fillrate>0.7) {
            setLabel(dst, "HEART", hull);
            shape = 2;
        }
        else if (fillrate>0.78&&fillrate<0.89) {
            setLabel(dst, "FLOWER", hull);
            shape = 5;
        }
        else {
            setLabel(dst, "Rect", hull);
            shape = 4;
        }

    }
    if (recogintionShow) {
        //imshow("src", src);
        imshow("dst", dst);
        if (save)
            imwrite("Result.jpg", dst);
        waitKey(0);
    }
    return shape;
    //*/
}
Пример #24
0
  // Tries to find a rectangular area surrounding most of the characters.  Not required
  // but helpful when determining the plate edges
  void PlateMask::findOuterBoxMask( vector<TextContours > contours )
  {
    double min_parent_area = pipeline_data->config->templateHeightPx * pipeline_data->config->templateWidthPx * 0.10;	// Needs to be at least 10% of the plate area to be considered.

    int winningIndex = -1;
    int winningParentId = -1;
    int bestCharCount = 0;
    double lowestArea = 99999999999999;

    if (pipeline_data->config->debugCharAnalysis)
      cout << "CharacterAnalysis::findOuterBoxMask" << endl;

    for (unsigned int imgIndex = 0; imgIndex < contours.size(); imgIndex++)
    {
      //vector<bool> charContours = filter(thresholds[imgIndex], allContours[imgIndex], allHierarchy[imgIndex]);

      int charsRecognized = 0;
      int parentId = -1;
      bool hasParent = false;
      for (unsigned int i = 0; i < contours[imgIndex].goodIndices.size(); i++)
      {
        if (contours[imgIndex].goodIndices[i]) charsRecognized++;
        if (contours[imgIndex].goodIndices[i] && contours[imgIndex].hierarchy[i][3] != -1)
        {
          parentId = contours[imgIndex].hierarchy[i][3];
          hasParent = true;
        }
      }

      if (charsRecognized == 0)
        continue;

      if (hasParent)
      {
        double boxArea = contourArea(contours[imgIndex].contours[parentId]);
        if (boxArea < min_parent_area)
          continue;

        if ((charsRecognized > bestCharCount) ||
            (charsRecognized == bestCharCount && boxArea < lowestArea))
          //(boxArea < lowestArea)
        {
          bestCharCount = charsRecognized;
          winningIndex = imgIndex;
          winningParentId = parentId;
          lowestArea = boxArea;
        }
      }
    }

    if (pipeline_data->config->debugCharAnalysis)
      cout << "Winning image index (findOuterBoxMask) is: " << winningIndex << endl;

    if (winningIndex != -1 && bestCharCount >= 3)
    {

      Mat mask = Mat::zeros(pipeline_data->thresholds[winningIndex].size(), CV_8U);

      // get rid of the outline by drawing a 1 pixel width black line
      drawContours(mask, contours[winningIndex].contours,
                   winningParentId, // draw this contour
                   cv::Scalar(255,255,255), // in
                   FILLED,
                   8,
                   contours[winningIndex].hierarchy,
                   0
                  );

      // Morph Open the mask to get rid of any little connectors to non-plate portions
      int morph_elem  = 2;
      int morph_size = 3;
      Mat element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );

      //morphologyEx( mask, mask, MORPH_CLOSE, element );
      morphologyEx( mask, mask, MORPH_OPEN, element );

      //morph_size = 1;
      //element = getStructuringElement( morph_elem, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
      //dilate(mask, mask, element);

      // Drawing the edge black effectively erodes the image.  This may clip off some extra junk from the edges.
      // We'll want to do the contour again and find the larges one so that we remove the clipped portion.

      vector<vector<Point> > contoursSecondRound;

      findContours(mask, contoursSecondRound, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
      int biggestContourIndex = -1;
      double largestArea = 0;
      for (unsigned int c = 0; c < contoursSecondRound.size(); c++)
      {
        double area = contourArea(contoursSecondRound[c]);
        if (area > largestArea)
        {
          biggestContourIndex = c;
          largestArea = area;
        }
      }

      if (biggestContourIndex != -1)
      {
        mask = Mat::zeros(pipeline_data->thresholds[winningIndex].size(), CV_8U);

        vector<Point> smoothedMaskPoints;
        approxPolyDP(contoursSecondRound[biggestContourIndex], smoothedMaskPoints, 2, true);

        vector<vector<Point> > tempvec;
        tempvec.push_back(smoothedMaskPoints);
        //fillPoly(mask, smoothedMaskPoints.data(), smoothedMaskPoints, Scalar(255,255,255));
        drawContours(mask, tempvec,
                     0, // draw this contour
                     cv::Scalar(255,255,255), // in
                     FILLED,
                     8,
                     contours[winningIndex].hierarchy,
                     0
                    );
      }

      if (pipeline_data->config->debugCharAnalysis)
      {
        vector<Mat> debugImgs;
        Mat debugImgMasked = Mat::zeros(pipeline_data->thresholds[winningIndex].size(), CV_8U);

        pipeline_data->thresholds[winningIndex].copyTo(debugImgMasked, mask);

        debugImgs.push_back(mask);
        debugImgs.push_back(pipeline_data->thresholds[winningIndex]);
        debugImgs.push_back(debugImgMasked);

        Mat dashboard = drawImageDashboard(debugImgs, CV_8U, 1);
        displayImage(pipeline_data->config, "Winning outer box", dashboard);
      }

      hasPlateMask = true;
      this->plateMask = mask;
	} else {
	  hasPlateMask = false;
	  Mat fullMask = Mat::zeros(pipeline_data->thresholds[0].size(), CV_8U);
	  bitwise_not(fullMask, fullMask);
	  this->plateMask = fullMask;
	}
  }
Пример #25
0
unsigned test(CClassifier * cl, CCaptcha * captcha)
{
	char captcha_predict[nic];
	unsigned u, v, max_area_ind;
	double area, max_area;
	Mat img = (* captcha)();
	Size size = img.size();
	Mat kernel(3, 3, CV_64FC1);
	Mat blr, median, filter, lpl, sum, temp, nimg, thr;
	vector<Mat> ch;
	vector<vector<Point> > cnt;

	kernel.at<double>(0, 0) = -0.1;
	kernel.at<double>(0, 1) = -0.1;
	kernel.at<double>(0, 2) = -0.1;
	kernel.at<double>(1, 0) = -0.1;
	kernel.at<double>(1, 1) = 2;
	kernel.at<double>(1, 2) = -0.1;
	kernel.at<double>(2, 0) = -0.1;
	kernel.at<double>(2, 1) = -0.1;
	kernel.at<double>(2, 2) = -0.1;

	medianBlur(img, median, 5);
	filter2D(median, filter, -1, kernel);
	Laplacian(filter, lpl, CV_32F, 5);
	threshold(lpl, thr, 150, 255, THRESH_BINARY);

	split(thr, ch);
	add(ch[0], ch[1], temp, noArray());
	add(temp, ch[2], sum, noArray(), CV_8U);

	for(u = 0; u < nic; u++)
	{
		try
		{
			Mat nimg = sum(Range(0, size.height), Range(u * size.width / nic, (u + 1) * size.width / nic)).clone();
			Mat tnimg = nimg.clone();
			temp = nimg.clone();
			Mat vc = vec(nimg);

			captcha_predict[u] = captcha->alphabet(cl->predict(vc));

			printf("%c\n", captcha_predict[u]);

			Mat cnt_img(size.height, size.width / nic, CV_8UC1);
			findContours(temp, cnt, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_TC89_KCOS);

			for(v = 0, max_area = 0; v < cnt.size(); v++)
			{
				area = contourArea(cnt[v]);

				if(area > max_area)
				{
					max_area = area;
					max_area_ind = v;
				}
			}

			vector<vector<Point> > approx_cnt;
			approx_cnt.push_back(vector<Point>());
			approxPolyDP(cnt[max_area_ind], approx_cnt[0], 2, true);

			rectangle(cnt_img, Point(0, 0), Point(size.width, size.height), Scalar::all(0), CV_FILLED);
			drawContours(cnt_img, approx_cnt, -1, Scalar::all(255));

			namedWindow("img", CV_WINDOW_NORMAL);
			namedWindow("nimg", CV_WINDOW_NORMAL);
			namedWindow("median", CV_WINDOW_NORMAL);
			namedWindow("filter", CV_WINDOW_NORMAL);
			namedWindow("laplacian", CV_WINDOW_NORMAL);
			namedWindow("thres", CV_WINDOW_NORMAL);
			namedWindow("sum", CV_WINDOW_NORMAL);
			namedWindow("cnt_img", CV_WINDOW_NORMAL);

			imshow("img", img);
			imshow("nimg", tnimg);
			imshow("median", median);
			imshow("filter", filter);
			imshow("laplacian", lpl);
			imshow("thres", thr);
			imshow("sum", sum);
			imshow("cnt_img", cnt_img);

			waitKey();

			destroyAllWindows();
		}
		catch(...)
		{
			;
		}
	}

	return captcha->check(captcha_predict);
}
Пример #26
0
bool Num_Extract::validate (Mat mask, Mat pre){
    std::vector<std::vector<cv::Point> > contour;
    Mat img;
    bool validate = false;
    bool validate1 = false;
    bool big = false;
    Canny(mask,img,0,256,5);
    vector<Vec4i> hierarchy;
    //find contours from post color detection
    cv::findContours(img, contour, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
    for(int i = 0 ; i<contour.size();i++){
        if(contourArea( contour[i],false)>0.5*320*240)big = true;// If too close to object
	}
    int count = 0;

    for(int i = 0 ; i<contour.size();i++){
        if(contourArea( contour[i],false)>1000) count++;
	}

    if(count == 0 )return validate;//filter out random noise
    Mat grey,grey0,grey1,grey2,grey3;
    vector<Mat> bgr_planes;
    split(pre,bgr_planes);

	std::vector<std::vector<cv::Point> > contour1;
	std::vector<cv::Point> inner;
	double area = 0;
    vector<int> valid_index ;
    vector<int> valid_test,bins_indices;

    for(int i = 0 ; i<contour.size();i++){
        if(contourArea( contour[i],false)>1000){
			area = area + contourArea( contour[i],false);
            valid_test.push_back(i);
			for(int j = 0;j < contour[i].size();j++){
				inner.push_back(contour[i][j]);
			}
		}
	}
	RotatedRect inrect = minAreaRect(Mat(inner));//bounding rectangle of bins (if detected)
	RotatedRect outrect ;
    double thresh = 0;
    double threshf;


    vector<int> count1;
    int count2 = 0;
    vector<Point> poly;
    if(!big){
        while(thresh < 1000 && (!validate && !validate1)){
            Canny(bgr_planes[0],grey1,0,thresh,5);//multi level canny thresholding
            Canny(bgr_planes[1],grey2,0,thresh,5);
            Canny(bgr_planes[2],grey3,0,thresh,5);
            max(grey1,grey2,grey1);
            max(grey1,grey3,grey);//getting strongest edges
            dilate(grey , grey0 , Mat() , Point(-1,-1));
            grey = grey0;
            cv::findContours(grey, contour1,hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
            for(int i = 0;i < contour1.size();i++){
                if(hierarchy[i][3]==-1){
                    continue;//excluding the outermost contour (contour due to the mask)
                }
                if(contourArea(contour1[i],false)>area){
                    outrect = minAreaRect(Mat(contour1[i]));//bounding rectangle of detected contour
                    if(A_encloses_B(outrect,inrect)){
                        valid_index.push_back(i);
                    }
                }
                count2 = 0;
                approxPolyDP(Mat(contour1[i]),poly,3,true);
                if(contourArea(contour1[i],false)>1500){
                    for(int j = 0 ; j < valid_test.size(); j++){
                        RotatedRect test = minAreaRect(Mat(contour[valid_test[j]]));
                        double area1 = contourArea(contour1[i],false);
                        double area2 = contourArea(contour[valid_test[j]],false);
                        if(pointPolygonTest(Mat(poly),test.center,false)>0 && area1>area2){
                            count2++;
                        }
                    }
                }

                count1.push_back(count2);
                poly.clear();
            }
            bool val = false;
            for(int i = 0 ; i < count1.size(); i++){
                if(count1[i]>=1 && val){
                    validate1 = true ;
                    break;
                }
                if(count1[i]>=1){
                    val = true;
                }
            }


            if(valid_index.size()>=1){
                validate = true;
                threshf = thresh;
            }
            thresh = thresh + 1000/11;
            valid_index.clear();
        }
    }
    else{
        validate = true;
    }
    if(validate || validate1){

        return true;
    }
    return validate;
}
bool cv::find4QuadCornerSubpix(InputArray _img, InputOutputArray _corners, Size region_size)
{
    CV_INSTRUMENT_REGION();

    Mat img = _img.getMat(), cornersM = _corners.getMat();
    int ncorners = cornersM.checkVector(2, CV_32F);
    CV_Assert( ncorners >= 0 );
    Point2f* corners = cornersM.ptr<Point2f>();
    const int nbins = 256;
    float ranges[] = {0, 256};
    const float* _ranges = ranges;
    Mat hist;

    Mat black_comp, white_comp;
    for(int i = 0; i < ncorners; i++)
    {
        int channels = 0;
        Rect roi(cvRound(corners[i].x - region_size.width), cvRound(corners[i].y - region_size.height),
            region_size.width*2 + 1, region_size.height*2 + 1);
        Mat img_roi = img(roi);
        calcHist(&img_roi, 1, &channels, Mat(), hist, 1, &nbins, &_ranges);

        int black_thresh = 0, white_thresh = 0;
        segment_hist_max(hist, black_thresh, white_thresh);

        threshold(img, black_comp, black_thresh, 255.0, THRESH_BINARY_INV);
        threshold(img, white_comp, white_thresh, 255.0, THRESH_BINARY);

        const int erode_count = 1;
        erode(black_comp, black_comp, Mat(), Point(-1, -1), erode_count);
        erode(white_comp, white_comp, Mat(), Point(-1, -1), erode_count);

        std::vector<std::vector<Point> > white_contours, black_contours;
        findContours(black_comp, black_contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
        findContours(white_comp, white_contours, RETR_LIST, CHAIN_APPROX_SIMPLE);

        if(black_contours.size() < 5 || white_contours.size() < 5) continue;

        // find two white and black blobs that are close to the input point
        std::vector<std::pair<int, float> > white_order, black_order;
        orderContours(black_contours, corners[i], black_order);
        orderContours(white_contours, corners[i], white_order);

        const float max_dist = 10.0f;
        if(black_order[0].second > max_dist || black_order[1].second > max_dist ||
           white_order[0].second > max_dist || white_order[1].second > max_dist)
        {
            continue; // there will be no improvement in this corner position
        }

        const std::vector<Point>* quads[4] = {&black_contours[black_order[0].first], &black_contours[black_order[1].first],
                                         &white_contours[white_order[0].first], &white_contours[white_order[1].first]};
        std::vector<Point2f> quads_approx[4];
        Point2f quad_corners[4];
        for(int k = 0; k < 4; k++)
        {
            std::vector<Point2f> temp;
            for(size_t j = 0; j < quads[k]->size(); j++) temp.push_back((*quads[k])[j]);
            approxPolyDP(Mat(temp), quads_approx[k], 0.5, true);

            findCorner(quads_approx[k], corners[i], quad_corners[k]);
            quad_corners[k] += Point2f(0.5f, 0.5f);
        }

        // cross two lines
        Point2f origin1 = quad_corners[0];
        Point2f dir1 = quad_corners[1] - quad_corners[0];
        Point2f origin2 = quad_corners[2];
        Point2f dir2 = quad_corners[3] - quad_corners[2];
        double angle = acos(dir1.dot(dir2)/(norm(dir1)*norm(dir2)));
        if(cvIsNaN(angle) || cvIsInf(angle) || angle < 0.5 || angle > CV_PI - 0.5) continue;

        findLinesCrossPoint(origin1, dir1, origin2, dir2, corners[i]);
    }

    return true;
}
Пример #28
0
void IITkgp_functions::ProcessingBlocks::LabelIndividualPB(void)
{
    Continue con;
    SelectLabel sl;
    int k =0;
    int p = 0;
    for(int i=0;i<contours.size();i++)
    {
        if(hierarchy[i][3] == -1 && validblock[i] == true)
        {


            if(LabelFlag[k] == false)  // If a Block is not labelled
            {
                p = p + 1;
                int sp;
                char *name;
                name = (char *) malloc(2000 * sizeof(char));
                sp = sprintf(name,"Processing Block%d",k);
                tempname = name;
                namedWindow(name, CV_WINDOW_KEEPRATIO);
                Mat Temp;
                Temp = Blocks[k];
                imshow(name, Temp);

                sl.setModal(true);  // select Label
                sl.exec();

                PBLabel[k] = selectedlabel;
                LabelFlag[k] = true;
                UnlabelledPB = UnlabelledPB - 1;
                LabelCount[selectedlabel] = LabelCount[selectedlabel] + 1;

                // Now Give Color To the Labelled PB

                vector<Point> contours_poly;
                Rect BoundRect;
                approxPolyDP( Mat(contours[i]), contours_poly, 3, true );
                BoundRect = boundingRect( Mat(contours_poly) );

                for(int m=BoundRect.y;m<BoundRect.y+BoundRect.height;m++)
                {

                    for(int n=BoundRect.x;n<BoundRect.x+BoundRect.width;n++)
                    {
                        bool measure_dist;
                        if((pointPolygonTest(contours_poly,Point(n,m),measure_dist) > 0.0) && src_binary.data[m*src_binary.cols+n]==0)
                        {
                            LabelImages[selectedlabel].data[m*ColorLabelImage.cols+n] = 0;
                            LabelImageInOne.data[m*ColorLabelImage.cols+n] = selectedlabel;
                            ColorLabelImage.data[(m*ColorLabelImage.cols+n)*3+0] = LabelColor[selectedlabel][0];
                            ColorLabelImage.data[(m*ColorLabelImage.cols+n)*3+1] = LabelColor[selectedlabel][1];
                            ColorLabelImage.data[(m*ColorLabelImage.cols+n)*3+2] = LabelColor[selectedlabel][2];
                        }
                    }
                }

                con.setModal(true); // Continue labeling Individually
                con.exec();

                if(!cflag) // do not want to continue labeling individual blocks
                    break;


                destroyWindow(name);
                Temp.release();

             }



            k++;
        }
    }





}
Пример #29
0
int main(int argc, char **argv)
{
  int res;

  try {
    socket.bind ("tcp://*:14444");
    s_sendmore (socket, "event");
        s_send (socket, "{type:\"up\"}");
  }
  catch (zmq::error_t e) {
    cerr << "Cannot bind to socket: " <<e.what() << endl;
    return -1;
  }

  //  printf("Kinect camera test\n");
  //
  //  int i;
  //  for (i=0; i<2048; i++) {
  //    float v = i/2048.0;
  //    v = powf(v, 3)* 6;
  //    t_gamma[i] = v*6*256;
  //  }
  //
  //  g_argc = argc;
  //  g_argv = argv;
  //
  //  //setup Freenect...
  //  if (freenect_init(&f_ctx, NULL) < 0) {
  //    printf("freenect_init() failed\n");
  //    return 1;
  //  }
  //
  //  freenect_set_log_level(f_ctx, FREENECT_LOG_ERROR);
  //
  //  int nr_devices = freenect_num_devices (f_ctx);
  //  printf ("Number of devices found: %d\n", nr_devices);
  //
  //  int user_device_number = 0;
  //  if (argc > 1)
  //    user_device_number = atoi(argv[1]);
  //
  //  if (nr_devices < 1)
  //    return 1;
  //
  //  if (freenect_open_device(f_ctx, &f_dev, user_device_number) < 0) {
  //    printf("Could not open device\n");
  //    return 1;
  //  }
  //
  //  freenect_set_tilt_degs(f_dev,freenect_angle);
  //  freenect_set_led(f_dev,LED_RED);
  //  freenect_set_depth_callback(f_dev, depth_cb);
  //  freenect_set_video_callback(f_dev, rgb_cb);
  //  freenect_set_video_format(f_dev, FREENECT_VIDEO_RGB);
  //  freenect_set_depth_format(f_dev, FREENECT_DEPTH_11BIT);
  //
  //  freenect_start_depth(f_dev);
  //  freenect_start_video(f_dev);

  initFreenect();

  //start the freenect thread to poll for events
  res = pthread_create(&ocv_thread, NULL, freenect_threadfunc, NULL);
  if (res) {
    printf("pthread_create failed\n");
    return 1;
  }

  Mat depthf;

  Mat frameMat(rgbMat);
  Mat blobMaskOutput(frameMat.size(),CV_8UC1),
  outC(frameMat.size(),CV_8UC3);
  Mat prevImg(frameMat.size(),CV_8UC1),
  nextImg(frameMat.size(),CV_8UC1),
  prevDepth(depthMat.size(),CV_8UC1);
  vector<Point2f> prevPts,nextPts;
  vector<uchar> statusv;
  vector<float> errv;
  Rect cursor(frameMat.cols/2,frameMat.rows/2,10,10);
  bool update_bg_model = true;
  int fr = 1;
  int register_ctr = 0,register_secondbloc_ctr = 0;
  bool registered = false;

  Point2i appear(-1,-1); double appearTS = -1;

  Point2i midBlob(-1,-1);
  Point2i lastMove(-1,-1);

  int hcr_ctr = -1;
  vector<int> hc_stack(20); int hc_stack_ptr = 0;

  while (!die) {
    fr++;

    //    imshow("rgb", rgbMat);
    pthread_mutex_lock(&buf_mutex);

    //Linear interpolation
    {
      Mat _tmp = (depthMat - 400.0);          //minimum observed value is ~440. so shift a bit
      _tmp.setTo(Scalar(2048), depthMat > ((!registered) ? 700.0 : 750.0));   //cut off at 600 to create a "box" where the user interacts
      _tmp.convertTo(depthf, CV_8UC1, 255.0/1648.0);  //values are 0-2048 (11bit), account for -400 = 1648
    }

    {
      Mat _tmp;
      depthMat.convertTo(_tmp, CV_8UC1, 255.0/2048.0);
      cvtColor(_tmp, outC, CV_GRAY2BGR);
    }

    pthread_mutex_unlock(&buf_mutex);

    //    { //saving the frames to files for debug
    //      stringstream ss; ss << "depth_"<<fr<<".png";
    //      imwrite(ss.str(), depthf);
    //    }

    //Logarithm interpolation - try it!, It should be more "sensitive" for closer depths
    //    {
    //      Mat tmp,tmp1;
    //      depthMat.convertTo(tmp, CV_32FC1);
    //      log(tmp,tmp1);
    //      tmp1.convertTo(depthf, CV_8UC1, 255.0/7.6246189861593985);
    //    }
    //    imshow("depth",depthf);


    Mat blobMaskInput = depthf < 255; //anything not white is "real" depth
    vector<Point> ctr,ctr2;


    Scalar blb = refineSegments(Mat(),blobMaskInput,blobMaskOutput,ctr,ctr2,midBlob); //find contours in the foreground, choose biggest
    imshow("first", blobMaskOutput);
    /////// blb :
    //blb[0] = x, blb[1] = y, blb[2] = 1st blob size, blb[3] = 2nd blob size.

    //    uint mode_counters[3] = {0};

    if(blb[0]>=0 && blb[2] > 500) { //1st blob detected, and is big enough
      //cvtColor(depthf, outC, CV_GRAY2BGR);

      //closest point to the camera
      Point minLoc; double minval,maxval;
      minMaxLoc(depthf, &minval, &maxval, &minLoc, NULL, blobMaskInput);
      circle(outC, minLoc, 5, Scalar(0,255,0), 3);

      Scalar mn,stdv;
      meanStdDev(depthf,mn,stdv,blobMaskInput);

      //cout << "min: " << minval << ", max: " << maxval << ", mean: " << mn[0] << endl;

      blobMaskInput = depthf < (mn[0] + stdv[0]*.5);

      blb = refineSegments(Mat(),blobMaskInput,blobMaskOutput,ctr,ctr2,midBlob);

      imshow("second", blobMaskOutput);

      if(blb[0] >= 0 && blb[2] > 300) {
        //draw contour
        Scalar color(0,0,255);
        for (int idx=0; idx<ctr.size()-1; idx++)
          line(outC, ctr[idx], ctr[idx+1], color, 1);
        line(outC, ctr[ctr.size()-1], ctr[0], color, 1);

        if(ctr2.size() > 0) {
          Scalar color2(255,0,255);
          for (int idx=0; idx<ctr2.size()-1; idx++)
            line(outC, ctr2[idx], ctr2[idx+1], color2, 2);
          line(outC, ctr2[ctr2.size()-1], ctr2[0], color2, 2);
        }

        //draw "major axis"
        //      Vec4f _line;
        Mat curve(ctr);
        //      fitLine(curve, _line, CV_DIST_L2, 0, 0.01, 0.01);
        //      line(outC, Point(blb[0]-_line[0]*70,blb[1]-_line[1]*70),
        //            Point(blb[0]+_line[0]*70,blb[1]+_line[1]*70),
        //            Scalar(255,255,0), 1);

        //blob center
        circle(outC, Point(blb[0],blb[1]), 50, Scalar(255,0,0), 3);


        //      cout << "min depth " << minval << endl;

        register_ctr = MIN((register_ctr + 1),60);

        if(blb[3] > 5000)
          register_secondbloc_ctr = MIN((register_secondbloc_ctr + 1),60);

        if (register_ctr > 30 && !registered) {
          registered = true;
          appear.x = -1;
          update_bg_model = false;
          lastMove.x = blb[0]; lastMove.y = blb[1];

          cout << "blob size " << blb[2] << endl;

          if(register_secondbloc_ctr < 30) {
            if(blb[2] > 10000) {
              cout << "register panner" << endl;
              send_event("Register", "\"mode\":\"openhand\"");
            } else {
              cout << "register pointer" << endl;
              send_event("Register", "\"mode\":\"theforce\"");
            }
          } else {
            cout << "register tab swithcer" << endl;
            send_event("Register", "\"mode\":\"twohands\"");
          }
        }

        if(registered) {
          stringstream ss;
          ss  << "\"x\":"  << (int)floor(blb[0]*100.0/640.0)
            << ",\"y\":" << (int)floor(blb[1]*100.0/480.0)
            << ",\"z\":" << (int)(mn[0] * 2.0);
          //cout << "move: " << ss.str() << endl;
          send_event("Move", ss.str());

          //---------------------- fist detection ---------------------
          //calc laplacian of curve
          vector<Point> approxCurve;  //approximate curve
          approxPolyDP(curve, approxCurve, 10.0, true);
          Mat approxCurveM(approxCurve);

          Mat curve_lap;
          calc_laplacian(approxCurveM, curve_lap);  //calc laplacian

          hcr_ctr = 0;
          for (int i=0; i<approxCurve.size(); i++) {
            double n = norm(((Point2d*)(curve_lap.data))[i]);
            if (n > 10.0) {
              //high curvature point
              circle(outC, approxCurve[i], 3, Scalar(50,155,255), 2);
              hcr_ctr++;
            }
          }

          hc_stack.at(hc_stack_ptr) = hcr_ctr;
          hc_stack_ptr = (hc_stack_ptr + 1) % hc_stack.size();

          Scalar _avg = mean(Mat(hc_stack));
          if (abs(_avg[0] - (double)hcr_ctr) > 5.0) { //a big change in curvature = hand fisted/opened?
            cout << "Hand click!" << endl;
            send_event("HandClick", "");
          }

          if (mode_state == MODE_NONE) {

          }

          //        imshow("out",out);
          //doHist(depthf,out);

          { //some debug on screen..
            stringstream ss; ss << "high curve pts " << hcr_ctr << ", avg " << _avg[0];
            putText(outC, ss.str(), Point(50,50), CV_FONT_HERSHEY_PLAIN, 2.0,Scalar(0,0,255), 2);
          }
        } else {
          //not registered, look for gestures
          if(appear.x<0) {
            //first appearence of blob
            appear = midBlob;
            //          update_bg_model = false;
            appearTS = getTickCount();
            cout << "appear ("<<appearTS<<") " << appear.x << "," << appear.y << endl;
          } else {
            //blob was seen before, how much time passed
            double timediff = ((double)getTickCount()-appearTS)/getTickFrequency();
            if (timediff > .2 && timediff < 1.0) {
              //enough time passed from appearence
              line(outC, appear, Point(blb[0],blb[1]), Scalar(0,0,255), 3);
              if (appear.x - blb[0] > 100) {
                cout << "right"<<endl; appear.x = -1;
                send_event("SwipeRight", "");
                update_bg_model = true;
                register_ctr = 0;
              } else if (appear.x - blb[0] < -100) {
                cout << "left" <<endl; appear.x = -1;
                send_event("SwipeLeft", "");
                update_bg_model = true;
                register_ctr = 0;
              } else if (appear.y - blb[1] > 100) {
                cout << "up" << endl; appear.x = -1;
                send_event("SwipeUp", "");
                update_bg_model = true;
                register_ctr = 0;
              } else if (appear.y - blb[1] < -100) {
                cout << "down" << endl; appear.x = -1;
                send_event("SwipeDown", "");
                update_bg_model = true;
                register_ctr = 0;
              }
            }
            if(timediff >= 1.0) {
              cout << "a ghost..."<<endl;
              update_bg_model = true;
              //a second passed from appearence - reset 1st appear
              appear.x = -1;
              appearTS = -1;
              midBlob.x = midBlob.y = -1;
            }
          }
        }
        send_image(outC);
      }
    } else {
      send_image(depthf);
      register_ctr = MAX((register_ctr - 1),0);
      register_secondbloc_ctr = MAX((register_secondbloc_ctr - 1),0);
    }
    imshow("blob",outC);

    if (register_ctr <= 15 && registered) {
      midBlob.x = midBlob.y = -1;
      registered = false;
      mode_state = MODE_NONE;
      update_bg_model = true;
      cout << "unregister" << endl;
      send_event("Unregister", "");
    }

        char k = cvWaitKey(5);
        if( k == 27 ) break;
        if( k == ' ' )
            update_bg_model = !update_bg_model;
    if (k=='s') {
      cout << "send test event" << endl;
      send_event("TestEvent", "");
    }
  }

  printf("-- done!\n");

  pthread_join(ocv_thread, NULL);
  pthread_exit(NULL);
  return 0;
}
	bool RingBox::init(const cv::Mat &inp, const pegBox &pegBox)
	{
		assert(pegBox.pegs.size() == 12);
		vector<vector<Point> > contours_ring;
		Mat initial_ring_mask = inp.clone();
		vector<Vec4i> hierarchy_ring;
		vector<Rect> initial_roi;


		findContours(initial_ring_mask, contours_ring, hierarchy_ring, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

		for (int i = contours_ring.size() - 1; i >= 0; i--)
		{
			if (contours_ring[i].size() < 50)
			{
				contours_ring.erase(contours_ring.begin() + i);
			}
		}
		unsigned int No_of_ring = contours_ring.size();

		if (No_of_ring == 6)
		{
			vector<vector<Point> > contours_poly(No_of_ring);
			vector<Point2f>center_poly(No_of_ring);
			vector<float>radius_poly(No_of_ring);

			for (int i = 0; i < No_of_ring; ++i)
			{

				ContourFeature cFeat(contours_ring[i]);
				approxPolyDP(Mat(contours_ring[i]), contours_poly[i], 3, true);
				minEnclosingCircle((Mat)contours_poly[i], center_poly[i], radius_poly[i]);
				initial_roi.push_back(boundingRect(contours_ring[i])); 
			}
			
			
			
			const int size_rings = 6;
			rings.resize(size_rings);
			for (int i = 0; i < size_rings; ++i)
			{
				Ring r;
				r.center = center_poly[i];
				r.radius = radius_poly[i];
				r.roi = initial_roi[i];
				r.status = STATIONARY;
				for (int p = 0; p < pegBox.pegs.size(); ++p)
				{
					Rect q = pegBox.pegs[p].roi;
					if ((r.roi & q).width > 0)
					{
						r.code_pos = pegBox.pegs[p].code;
						rings[i] = r;
						break;
					}
				}	
			}
			if (rings.size() == 6)
			{
				return true;
			}
			else
			{
				cout << "There is some problem in overlapping\n";
				return false;
			}
		}
		else
		{
			return false;
		}
	}