示例#1
0
void background(Mat &img, Mat &fore, Mat &back)
{
  bg.operator()(img, fore);
  bg.getBackgroundImage(back);

  erode(fore, fore, Mat());
  return ;
}
示例#2
0
void ballDetect :: initDetect(char *videoInput){

    VideoCapture capture;
    Mat src, src_HSV, processed;
    int x=0; int y=0; 

    Mat currentFrame, back, fore;   
    BackgroundSubtractorMOG2 bg;

    std::vector<std::vector<cv::Point> > contours;

    capture.open(videoInput);
    capture.set(CV_CAP_PROP_FRAME_WIDTH, FRAME_WIDTH);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, FRAME_HEIGHT);

    // int xyz=1;

    while(1){

        // cout<<xyz++<<endl;
        capture.read(src);
        cvtColor(src, src_HSV, COLOR_BGR2HSV);

        bg.operator ()(src, fore);
        bg.getBackgroundImage(back);
        erode(fore, fore, Mat());
        dilate(fore, fore, Mat());
        findContours(fore, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
        // drawContours(src,  contours,  -1,  Scalar(0,  0,  255),  2);
        contourCount=contours.size();

        if(white_collide.x != -1 && white_collide.y!=-1){
            circle(src, white_collide, 2, Scalar(0, 0, 0), 2);
            circle(src, white_initial, 2, Scalar(0, 0, 0), 2);
            line(src, white_initial, white_collide, Scalar(255, 255, 255), 1, CV_AA);
        }

        inRange(src_HSV, *minval, *maxval, processed);
        morphOps(processed);
        trackFilteredObject(x, y, processed, src);

        for(int i=0;i<(int)white_position.size()-1;++i){
            line(src, white_position[i], white_position[i+1], Scalar(255, 255, 255), 1, CV_AA); 
        }
        while(white_collide.x == -1 && white_collide.y==-1){
            setMouseCallback("source", onMouseClick, &src);
            putText(src, "Specify Point", Point(750, 40), 1, 1, Scalar(255, 0, 0), 2);
            imshow("source", src);
            waitKey(5);
        }

        imshow("source", src);
        waitKey(5);
    }
}
int main ()
{
  Mat frame;
  Mat back;
  Mat fore;
  VideoCapture cap1(1);/*to capture from camera*/
  BackgroundSubtractorMOG2 bg;//works on GMM
  bg.set ("nmixtures", 10);
  vector < vector < Point > >contours;
  namedWindow ("Frame");
  int i=0;
	 
  for (;;)
    {
	cap1 >> frame;
    bg.operator()(frame, fore);
    bg.getBackgroundImage (back);
    erode (fore, fore, cv::Mat ());
    erode (fore, fore, cv::Mat ());
    dilate (fore, fore, cv::Mat ());
	dilate (fore, fore, cv::Mat ());
	dilate (fore, fore, cv::Mat ());
	findContours (fore, contours, CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
    drawContours (frame, contours, -1, Scalar (255, 255, 255), 1);
	Scalar color = Scalar(200,200,200);
	int a=0;
	vector<Rect> boundRect( contours.size() );
    for( int i = 0; i < contours.size(); i++ )
	  {
		   boundRect[i] = boundingRect( contours[i] );
	  }
	for( i = 0; i< contours.size(); i++ )
     {
		if(boundRect[i].width>=40 || boundRect[i].height>=40)//eliminates small boxes
			{
				a=a+(boundRect[i].height)*(boundRect[i].width);
			 }
		//  cout<<"Net contour area is "<<a<<"\n";
	    if(a>=int(frame.rows)*int(frame.cols)/2)//change denominator as per convenience
			{
				putText(frame,"Tampering",Point(5,30),FONT_HERSHEY_SIMPLEX,1,Scalar(0,255,255),2);
				cout<<"\a";
			}
	   }
   imshow ("Frame", frame);
   waitKey(10);	 
 }
  return 1;
}
void testBS(string inputpath, string maskpath, string inputPrefix, string extinput, int firstFileP, int lastfile, int step)
{

    int currFrame=-1;
    int lastFrame=lastfile;
    string maskprefix("/bin");
    string extmask("png");
    std::ofstream ofs;
    UtilCpp u;
    string str;

    if(firstFileP>=0)
        currFrame=firstFileP;
    else
        currFrame=1;
    if(lastfile>0)
        lastFrame=lastfile;

    while( currFrame <= lastFrame )
    {
        Mat input = u.getFrame(false, inputpath+inputPrefix, currFrame, 6, extinput);
        Mat resultFg = processImages(input,currFrame);
        string rname = string(maskpath)+"/bin";
        string bgname = string(maskpath)+"/bg/bg";
        u.writeImg(rname, resultFg, currFrame, string("jpg"));
        Mat bg;
        pMOG.getBackgroundImage(bg);
        u.writeImg(bgname,bg,currFrame,string("bmp"));
        input.release();
        currFrame+=step;
    }
}
int main(int argc, char *argv[])
{
    Mat frame;
    Mat back;
    Mat fore;
    VideoCapture cap(0);
	int i=0;
    BackgroundSubtractorMOG2 bg;
    bg.set("nmixtures",3);
    bg.set("detectShadows",false);
    namedWindow("Frame");
    namedWindow("Background");
    int backgroundFrame=500;
    for(;;)
    {
        //Get the frame
        cap >> frame;
        //Update the current background model and get the foreground
    if(backgroundFrame>0)
        {bg.operator ()(frame,fore);backgroundFrame--;}
    else
        {bg.operator()(frame,fore,0);}
    //Get background image to display it
    bg.getBackgroundImage(back);
    imshow("Frame",frame);
    imshow("Background",back);
	if(waitKey(10) >= 0) break;
    }
return 0;
}
示例#6
0
int main(int argc, char *argv[])
{
	
	/* @frame - raw frame
	 * @back  - background image
	 * @fore  - foreground mask
	 */
    Mat frame, back, fore;
	VideoCapture cap(0);	// Caputure input from camera
	// Read above description
    if(cap.isOpened() == false){
        cout << " Failed to access video camera.";
    }
	BackgroundSubtractorMOG2 bg;
	bg.set("detectShadows", false);
	bg.set("nmixtures", 3);
	bg.set("history", 30);
	// vector of vector of points
	// We connect set of detected points to contours
	// 1 vector of points = 1 contour
	// No of contours detected on fly :-)
	std::vector< std::vector<cv::Point> > contours;
    namedWindow("Frame");
	
	for( ; ;)
	{
		// Grab frame from video at store it into frame
		cap >> frame;
		// Detect foreground objects
		// and write to fore matrix
		bg.operator()(frame, fore);
		// Detect background
		// and store in back matrix
		bg.getBackgroundImage(back);
		// Noise removal, we perform two operations
		// 1. erode
		// 2. dilate
		erode(fore, fore, Mat());
		dilate(fore, fore, Mat());
		// findcountours maps vector of points to contours
		// as described above
		findContours(fore, contours, CV_RETR_EXTERNAL,
					 CV_CHAIN_APPROX_NONE );
		// Now draw those contours on frame with red color
		vector<Point2f>center( contours.size() );
		vector<float>radius( contours.size() );

		drawContours(frame, contours, -1, Scalar(0, 0, 255), 2);
		imshow("Frame", frame);
		
		if(waitKey(30) >= 0) break;
	}

    return 0;
}
示例#7
0
int main(int argc, char *argv[])
{
    Mat frame;
    Mat back;
    Mat fore;
    Mat frame_roi;
    Rect roi_rect;
    VideoCapture cap(path);
    BackgroundSubtractorMOG2 bg;
    //bg.set("bShadowDetection", false);
    //bg.bShadowDetection = false;
    
    vector<vector<Point> > contours;
    
    namedWindow("Frame");
    namedWindow("Background");
    
    cap >> frame; 
    Rect rect_roi = getSelected(frame);
    
    
    for(;;)
    {
        cap >> frame;           // capture frame from video
        frame_roi = frame(rect_roi);
        bg.operator ()(frame_roi,fore);
        bg.getBackgroundImage(back);    // get background image
        erode(fore,fore,Mat());
        dilate(fore,fore,Mat());
        
        findContours(fore,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);  // find contours
        
        vector<vector<Point> > contours_poly( contours.size() );
        vector<Rect> boundRect( contours.size() );
        vector<Point2f>center( contours.size() );
        vector<float>radius( contours.size() );
        
        for( int i = 0; i < contours.size(); i++ )
        { 
            approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
            boundRect[i] = boundingRect( Mat(contours_poly[i]) );
            minEnclosingCircle( (Mat)contours_poly[i], center[i], radius[i] );
        }
        
        
        /// Draw polygonal contour + bonding rects + circles
        Mat drawing = Mat::zeros( fore.size(), CV_8UC3 );
        for( int i = 0; i< contours.size(); i++ )
        {
            if(boundRect[i].area() <min_size || boundRect[i].area() >max_size){
                continue;
            }
            Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
            //drawContours( frame, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
            rectangle( frame_roi, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0 );
            circle( frame_roi, center[i], (int)radius[i], color, 2, 8, 0 );
        }

        
        
        /////
        imshow("Frame",frame);
        imshow("Background",fore);
        
        if(waitKey(1) >= 0) break;
    }
    return 0;
}
示例#8
0
文件: test4.cpp 项目: jos-technion/CV
int main(int argc, char* argv[])
{
	CvCapture* capture = 0;
	Mat frame, frameCopy, image;
	char c;
	vector <Rect> roi;
	VideoCapture cap(0); // open the default camera

	if (!cap.isOpened())  {
		cout << "Error:" << endl;
	}
		// check if we succeeded
	namedWindow("result");
	 char TrackbarName[50];
	sprintf( TrackbarName, "Red max %d", red_t_max );
	createTrackbar( TrackbarName, "result", &red_t_max, 70, on_trackbar );
	sprintf( TrackbarName, "Red min %d", red_t_min );
	createTrackbar( TrackbarName, "result", &red_t_min, 70, on_trackbar );

	sprintf( TrackbarName, "Blue max %d", blue_t_max );
	createTrackbar( TrackbarName, "result", &blue_t_max, 70, on_trackbar );
	sprintf( TrackbarName, "Blue min %d", blue_t_min );
		createTrackbar( TrackbarName, "result", &blue_t_min, 70, on_trackbar );

	sprintf( TrackbarName, "Green max %d", green_t_max );
	createTrackbar( TrackbarName, "result", &green_t_max, 70, on_trackbar );
	sprintf( TrackbarName, "Green min %d", green_t_min );
	createTrackbar( TrackbarName, "result", &green_t_min, 70, on_trackbar );

	int count = 0;
	bool filter = false;
	cv::Scalar total_mean[POINTS];
	 namedWindow("Frame");
	 namedWindow("Background");
	 BackgroundSubtractorMOG2 bg;
	 bg.set("nmixtures",3);
	 bg.set("detectShadows",false);
	// namedWindow("FG Mask MOG 2");


	//cap >> frame;

	//namedWindow("filtered");
	 int background = 500;
	 Mat fore;
	 Mat back;
	for (;;)
	{
		cap >> frame;
		if(background>0)
				{
			bg.operator ()(frame,fore);background--;
				}
				else
				{
					putText(frame,"Ready",Point(10,20),FONT_HERSHEY_PLAIN,1,Scalar(0,0,0));
					filter = true;

					bg.operator()(frame,fore,0);
				}
		bg.getBackgroundImage(back);
		erode(fore,fore,Mat());
				dilate(fore,fore,Mat());
		imshow("Frame",frame);
		imshow("Background",back);
		imshow("Forground",fore);
		//cout<< background << endl;
		/* pMOG->operator()(frame, fgMaskMOG);
			 pMOG2->operator()(frame, fgMaskMOG2);
		 imshow("Frame", frame);
		  imshow("FG Mask MOG", fgMaskMOG);
		  imshow("FG Mask MOG 2", fgMaskMOG2);*/
	//	flip(frame,frameCopy,1);
		//imshow("result", frame);
	//	imshow("result2", frameCopy);
		if (filter) {
			//Mat dst;


		/*	Scalar lowerBound=Scalar( total_mean[0][0] -red_t_min , total_mean[0][1] -green_t_min, total_mean[0][2] - blue_t_min );
			Scalar upperBound=Scalar( total_mean[0][0] + red_t_max , total_mean[0][1] + green_t_max, total_mean[0][2] + blue_t_max );
			Mat dst;
			inRange(frame, lowerBound, upperBound, dst);
			for(int i =1; i < POINTS; i ++) {
				Scalar lowerBound=Scalar( total_mean[i][0] - red_t_min , total_mean[i][1] - green_t_min, total_mean[i][2] - blue_t_min );
				Scalar upperBound=Scalar( total_mean[i][0] + red_t_max , total_mean[i][1] + green_t_max, total_mean[i][2] + blue_t_max );
				Mat tmp;
				inRange(frame, lowerBound, upperBound, tmp);
				dst+=tmp;
			}
			medianBlur(dst, dst,7);
			imshow("result2", dst);
			pyrUp(dst,dst);

*/
			findContours(fore,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
			hullI=vector<vector<int> >(contours.size());
			hullP=vector<vector<Point> >(contours.size());
			defects=vector<vector<Vec4i> > (contours.size());
			//cout << " We found: " << contours.size() << endl;
			int indexOfBiggestContour = -1;
			int sizeOfBiggestContour = 0;
			for (int i = 0; i < contours.size(); i++){
				if(contours[i].size() > sizeOfBiggestContour){
					sizeOfBiggestContour = contours[i].size();
					indexOfBiggestContour = i;
				}
			}
			if(sizeOfBiggestContour < 300)
				continue;
			cout << "Size: " << sizeOfBiggestContour << endl;
			cIdx = indexOfBiggestContour;
			 bRect = boundingRect(Mat(contours[indexOfBiggestContour]));
			 bRect_height=bRect.height;
			 	bRect_width=bRect.width;
			convexHull(Mat(contours[indexOfBiggestContour]),hullP[indexOfBiggestContour],false,true);
			convexHull(Mat(contours[indexOfBiggestContour]),hullI[indexOfBiggestContour],false,false);
			approxPolyDP( Mat(hullP[indexOfBiggestContour]), hullP[indexOfBiggestContour], 18, true );
			if(contours[indexOfBiggestContour].size()>3 ){

				convexityDefects(contours[indexOfBiggestContour],hullI[indexOfBiggestContour],defects[indexOfBiggestContour]);
			    eleminateDefects(&fore);
			}
			getFingerTips(frame);
			bool isHand=detectIfHand();
			rectangle(frame,bRect,cv::Scalar(200,0,0));
			drawContours(frame,hullP,cIdx,cv::Scalar(200,0,0),2, 8, vector<Vec4i>(), 0, Point());
					if(isHand){
					//	getFingerTips(frame);
						drawFingerTips(frame);
					//	myDrawContours(frame);
					}

		}
		/*Scalar color = Scalar(0, 255, 0);
		roi.push_back(Rect(Point(frame.cols / 3, frame.rows / 6), Point(frame.cols / 3 + 20, frame.rows / 6 + 20)));
		roi.push_back(Rect(Point(frame.cols / 4, frame.rows / 2), Point(frame.cols / 4 + 20, frame.rows / 2 + 20)));
		roi.push_back(Rect(Point(frame.cols / 3, frame.rows / 1.5), Point(frame.cols / 3 + 20, frame.rows / 1.5 + 20)));
		roi.push_back(Rect(Point(frame.cols / 2, frame.rows / 2), Point(frame.cols / 2 + 20, frame.rows / 2 + 20)));
		roi.push_back(Rect(Point(frame.cols / 2.5, frame.rows / 2.5), Point(frame.cols / 2.5 + 20, frame.rows / 2.5 + 20)));
		roi.push_back(Rect(Point(frame.cols / 2, frame.rows / 1.5), Point(frame.cols / 2 + 20, frame.rows / 1.5 + 20)));
		roi.push_back(Rect(Point(frame.cols / 2.5, frame.rows / 1.8), Point(frame.cols / 2.5 + 20, frame.rows / 1.8 + 20)));
		rectangle(frame, roi[0], color, 2);
		rectangle(frame,roi[1] , color, 2);
		rectangle(frame, roi[2], color, 2);
		rectangle(frame, roi[3], color, 2);
		rectangle(frame, roi[4], color, 2);
		rectangle(frame, roi[5], color, 2);
		rectangle(frame, roi[6], color, 2);*/

		std::string win = "majd";
		imshow(win, frame);
		//imshow("faaa",frame);

		/*roi.push_back(My_ROI(Point(m->src.cols / 3, m->src.rows / 6), Point(m->src.cols / 3 + square_len, m->src.rows / 6 + square_len), m->src));
		roi.push_back(My_ROI(Point(m->src.cols / 4, m->src.rows / 2), Point(m->src.cols / 4 + square_len, m->src.rows / 2 + square_len), m->src));
		roi.push_back(My_ROI(Point(m->src.cols / 3, m->src.rows / 1.5), Point(m->src.cols / 3 + square_len, m->src.rows / 1.5 + square_len), m->src));
		roi.push_back(My_ROI(Point(m->src.cols / 2, m->src.rows / 2), Point(m->src.cols / 2 + square_len, m->src.rows / 2 + square_len), m->src));
		roi.push_back(My_ROI(Point(m->src.cols / 2.5, m->src.rows / 2.5), Point(m->src.cols / 2.5 + square_len, m->src.rows / 2.5 + square_len), m->src));
		roi.push_back(My_ROI(Point(m->src.cols / 2, m->src.rows / 1.5), Point(m->src.cols / 2 + square_len, m->src.rows / 1.5 + square_len), m->src));
		roi.push_back(My_ROI(Point(m->src.cols / 2.5, m->src.rows / 1.8), Point(m->src.cols / 2.5 + square_len, m->src.rows / 1.8 + square_len), m->src));
		*/
		c = waitKey(10);
		if (c == 'q') {
			return 0;
		}

	//	c = 'a';
		if (c == 'a') {
			//Mat tst = frame(roi[0]);
			//total_mean = cv::mean(tst);;


		   filter = true;


		}
	}
	return 0;
}
示例#9
0
文件: old.cpp 项目: pyaephyoe/gesture
int main(int argc, char* argv[])
{

	VideoCapture capture;
	capture.open(0);
	capture.set(CV_CAP_PROP_FRAME_WIDTH, 1280);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT, 720);

	while (TRUE)
	{
		capture.read(curr_frame);
		alpha.set("nmixtures",3);		
		alpha.operator()(curr_frame, Final, 0.1);
		//alpha.getBackgroundImage(Bg); 
		//Final = curr_frame - ( Bg / 2);
		Mat element = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7));
		erode(Final,Final, element);
		int x=0,y=0,rows=Final.rows,cols=Final.cols,num=0,sumx=0,sumy=0,pixval=0,x1=0,y1=0;
		for(x=0;x<=rows;x++)
		{
			for(y=0;y<=cols;y++)
			{
				pixval=Final.at<unsigned char>(x,y);
				//printf("%d\n",pixval);
				if(pixval>0)
				{
					sumx=sumx+x;
					sumy=sumy+y;
					//printf("%d\n",num);
					num++;

				}
			}
		}
		printf("%d,%d, %d\n",sumx/num,sumy/num, num);
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;
		findContours(Final, contours, hierarchy, 5, 5, Point(0, 0));

		vector<vector<Point> > contours_poly(contours.size());
		vector<Rect> boundRect(contours.size());
		
		vector<Point2f>center(contours.size());
		vector<float>radius(contours.size());

		for (int i = 0; i < contours.size(); i++)
		{
			approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true);
			boundRect[i] = boundingRect(Mat(contours_poly[i]));
			minEnclosingCircle((Mat)contours_poly[i], center[i], radius[i]);
		}

				for (int i = 0; i< contours.size(); i++)
		{
					Scalar color;
			drawContours(Final, contours_poly, i, color, 1, 8, vector<Vec4i>(), 0, Point());
			rectangle(Final, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0);
			
		}

		imshow("", Final);
		//imshow("",curr_frame);
		//imshow("", Final);
		if(waitKey(30)>=0) break;
	}
	return 0;
}
示例#10
0
int main(int argc, char *argv[])
{
    RNG rng(12345);

    Mat frame;
    Mat back;
    Mat fore;
    VideoCapture cap(0);

    const int nmixtures =3;
    const bool bShadowDetection = false;
    const int history = 4;
    double dist2Center;
    BackgroundSubtractorMOG2 bg (history,nmixtures,bShadowDetection);

    vector<vector<Point> > contours;
    
    namedWindow("Frame");
    namedWindow("Background");
    setMouseCallback( "Frame", onMouse, 0 );
    //createTrackbar( "Smin", "CamShift Demo", &smin, 256, 0 );
    createTrackbar( "Vmin", "Frame", &vmin, 256, 0 );
    createTrackbar( "Vmax", "Frame", &vmax, 256, 0 );
    createTrackbar( "Smin", "Frame", &smin, 256, 0 );
    
    for(;;)
    {
        cap >> frame;
        bg.operator ()(frame,fore);
        bg.getBackgroundImage(back);
        erode(fore,fore,Mat());
        dilate(fore,fore,Mat());
        findContours(fore,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE); // CV_RETR_EXTERNAL retrieves only the extreme outer contours
        // vectors to hold contours infos
        vector<vector<Point> > contours_poly( contours.size() );
        vector<Rect> boundRect( contours.size() );
        vector<Point2f>center( contours.size() );
        vector<float>radius( contours.size() );

        for( unsigned int i = 0; i< contours.size(); i++ )
        {
            Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
            approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true);
            boundRect[i] = boundingRect ( Mat(contours_poly[i]) );

            double area = boundRect[i].area();
            if (area > 10000)
            {
                Point2f center(boundRect[i].x + boundRect[i].width/2.0, boundRect[i].y + boundRect[i].height/2.0);

                // Test if the center of a contour has crossed ROI (direction: going in or out)
                if (parking.size() > 3)
                {
                    dist2Center = pointPolygonTest(parking, center, true);
                }
                cout << center << "is " << dist2Center << " distance from the contour. \n"; 
                putText(frame, "I", center, FONT_HERSHEY_COMPLEX_SMALL, 1.5, color, 1);
                rectangle(frame, boundRect[i].tl(), boundRect[i].br(), Scalar(100, 100, 200), 2, CV_AA);
            
                // Tracking object with camShift
                if (trackObject) 
                {
                    CamShiftTracker CSTracker;
                    CSTracker.track(frame, boundRect[i], vmin, vmax, smin);

                    if( backProjMode )
                        cvtColor(CSTracker.getBackProjection(), frame, CV_GRAY2BGR );
                    ellipse(frame, CSTracker.getTrackBox(), Scalar(0,0,255), 3, CV_AA );
                }
            }
        }


        /*
         * Draw parking zone
         */
        for (unsigned int j = 0; j < parking.size(); j++)
        {
            circle(frame, parking[j], 5, Scalar(0,0,255), -1);
        }
        drawPaking(frame);

        imshow("Frame",frame);
        imshow("Background",back);

        char c = (char)waitKey(30);
        if( c == 27 || c == 'q')
            break;
        switch(c)
        {
            case 's':
                traceParking = !traceParking; 
                break;
            case 't':
                trackObject = !trackObject;
                break;
            case 'b':
                backProjMode = !backProjMode;
                break;
            case 'c':
                parking.clear();
                break;
            default:
                ;
        }
    }       

    return 0;
}
int main(int argc, char *argv[])
{
    Mat frame1;
    Mat frame;
    Mat gray;
    Mat fore;
    Mat biggest_contour;

    Rect roi;

//    CvKalman* kalman = cvCreateKalman( 2, 1, 0 );//state, measurement, control
//    CvRandState rng;
//    cvRandInit(&rng,0,1,-1,CV_RAND_UNI);
//    cvRandSetRange( &rng, 0, 0.1, 0 );
//    rng.disttype = CV_RAND_NORMAL;
    CvMat* kmes = cvCreateMat(1, 1, CV_32FC1);//measurement, just angle
    cvZero(kmes);
    const float F[] = {1,1,0,1};//transition matrix describes model parameters at k and k+1
//    memcpy( kalman->transition_matrix->data.fl, F, sizeof(F));//dest, src, #of bytes
//   cvSetIdentity( kalman->measurement_matrix,    cvRealScalar(1) );//matPtr, value
//    cvSetIdentity( kalman->process_noise_cov,     cvRealScalar(1e-5) );
//    cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(1e-1) );
//    cvSetIdentity( kalman->error_cov_post,        cvRealScalar(1));
//    cvRand(&rng, kalman->state_post);//choose random initial state
    Scalar red = CV_RGB(255,0,0);
    Scalar blue = CV_RGB(0,0,255);
    Mat dummy;
    int value = 0;

    namedWindow("kalman", CV_WINDOW_AUTOSIZE);
    namedWindow("frame", CV_WINDOW_AUTOSIZE);
    const int FRAME_W = 640;
    const int FRAME_H = 480;

    BackgroundSubtractorMOG2 bg = BackgroundSubtractorMOG2(200, 16, false);
    vector<vector<Point> > precontours;
    vector<vector<Point> > contours;
    Point last_center;
    Point cpolar;
    Scalar color = Scalar(255,255,255);
    unsigned long frame_count = 0;
    unsigned long largest_size = 0;  //biggest contour
    int contour_size = 0;
    int i = 0, j = 0;
    int larg_contour_index;
    int angl, rad;

    int avg, wit = 0, state = 0, avrg[75] , k = 0;
    for ( j=0;j<75;++j ) { avrg[j] = 0; }


    // open the 360 cam, and set it's resolution
    // VideoCapture cap("/Users/riley/Desktop/calit2-1.mov");
    VideoCapture cap(1);
    if(!cap.isOpened())
    {
        cout << endl << "Failed to open video source" << endl << endl;
    }
    else
    {
        cout << endl << "Connected to 360 camera." << endl << endl;
    }

    cap.set(CV_CAP_PROP_FRAME_WIDTH, FRAME_W);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, FRAME_H);

    // tracking loop
    while( 1 )
    {
        cap >> frame1;
        //cout << "Frame number: " <<frame_count << endl;
        roi = Rect(160, 250, 300, 150);
        frame = frame1(roi);

        // grayscale
        //cvtColor(frame, gray, CV_BGR2GRAY); //grayscale the frame
        //bg.operator()(gray,fore);           //get the binary foreground image

        // color
        bg.operator()(frame, fore, 0.002);

        //Perform a morphological close to fuse contours
//        Mat kernel = getStructuringElement(MORPH_RECT, Size(5,5), Point(-1,-1));
        //morphologyEx(fore, fore, MORPH_CLOSE, kernel, Point(-1, -1), 10);
//        int iterations = 5;
//        dilate(fore, fore, kernel, Point(-1,-1), iterations);

        // find all contours, get all the points in each contour
        findContours(fore, precontours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);

        // filter small contours
        contours = precontours;
        j = 0;
        for( i = 0; i < precontours.size(); ++i )
        {
            contour_size = precontours[i].size();
            if( contour_size < 5 )
            {
                contours.erase(contours.begin()+i-j);
                ++j;
            }
        }

        // find largest contour
        largest_size = 0;
        larg_contour_index = 0;
        for( i = 0; i < contours.size(); ++i )
        {
            contour_size = contours[i].size();
            if( contour_size > largest_size )
            {
                largest_size = contour_size;
                larg_contour_index = i;
            }
        }

        // needs to be at least one contour for us to choose a new tracking point
        if( contours.size() > 1 )
        {
            // isolate largest contour in its own Mat
            biggest_contour = Mat::zeros(frame.rows, frame.cols, CV_8UC1);
            drawContours( biggest_contour, contours, larg_contour_index, color, -1, 8);
            last_center = get_centroid(biggest_contour);
            cpolar = get_polar (last_center.x, last_center.y, frame.cols);

            //filter based on Kalman's output


            //kalman
          /*  const CvMat *kprd= cvKalmanPredict(kalman,0);//predict pos. kalman, control
            cvSetReal1D(kmes, 0, cpolar.y);
            dummy = cvarrToMat(kprd);
            value = dummy.at<float>(0,0);
            cvKalmanCorrect( kalman, kmes );
            line(frame, Point(frame.cols/2,0), Point(150*cos(cpolar.y*PI/180)+150, 150*sin(cpolar.y*PI/180)), red, 1, 8, 0);
            line(frame, Point(frame.cols/2,0), Point(150*cos(value*PI/180)+150, 150*sin(value*PI/180)), blue, 1, 8, 0);
*/
        //    imshow("kalman", fore);
            imshow("kalman", biggest_contour);
            imshow("frame", frame);
        }
        else
        {
            cout << endl << " no contour";
        }

        //decision making for target state

        if ( k > 74 )
            k = 0; //movement in the last 5 seconds

        avrg[k] = cpolar.y;
        ++k;
        avg = 0;
        for ( j=0;j<75;++j)
            avg += avrg[j];

        if ( cpolar.y < 5 || cpolar.y > 175 ) //if target is out of view (teta < 5 or teta > 175)
        {
            if( wit > 100 )
                state = 0; //wait about a min before going to 0
            else
              ++wit;
        }
        else //if target is in view (5 < teta < 175)
        {
            if ( cpolar.y == avg/75 ) //if target is not moving for 5 sec
            {
                if ( wit > 100 )
                    state = 0; //wait about a min before going to 0
            else
              ++wit;
            }
            else
            {
              state = 1;
              wit = 0;
            } //if target is moving in the view area record and set the wait to 0
        }

        //cout << endl << " x = " << last_center.x;
        //cout << " y = " << last_center.y;
        //cout << " wait = " << wit;
        //cout << " avg = " << avg/75;
        //cout << endl << " r = " << cpolar.x;
        cout << " (teta,state) = " << "(" << cpolar.y << "," << state << ")" << endl;
        //cout << endl << " kalman out = " << value;
        //cout << endl << " biggest contour size = " << contours[larg_contour_index].size();
        //cout << endl << " xp = " << cos(cpolar.y*PI/180);
        //cout << " yp = " << sin(cpolar.y*PI/180);
        //cout << endl << " cos(v) = " << cos(value*PI/180);
        //cout << " sin(v) = " << sin(value*PI/180);

        //cout <<endl<<endl;
        ++frame_count;
        waitKey(1);
        //if( waitKey(1) >= 0) break;
    }
    return 0;
}
示例#12
0
int main(int argc, char *argv[])
{
	// Haar cascade classifiers initialization
	CascadeClassifier hand_cascade("hand3.xml");
	CascadeClassifier hand_cascade1("fist1.xml");
	CascadeClassifier hand_cascade2("fist2.xml");
	CascadeClassifier hand_cascade3("palm1.xml");
	CascadeClassifier hand_cascade4("palm2.xml");
	CascadeClassifier face_cascade("face1.xml");
	CascadeClassifier eye_cascade("haarcascade_mcs_eyepair_small.xml");
	CascadeClassifier nose_cascade("haarcascade_mcs_nose.xml");

	VideoCapture cap(1);	// Capture source - default is 0

	// working mode checks
	bool useHaar = true;
	bool useSkinDet = false;

	int cascadeIndex = 1; // current cascade index
	char waitkey = '-';   // last read key - when not clicked is '-'

	int backgroundFrame = 500; // number of frames to initialize background

					// Materials:
	Mat frame;		// one frame of capture
	Mat back;		// background image
	Mat fore;		// foreground image
	Mat detection;	// skin color detection
	Mat image;		// our painting

					// Skin color detetction:
	Mat ycbcr;		// YCbCr
	Mat hsv;		// Hue Saturation Value

	

	// Background subtractor
	BackgroundSubtractorMOG2 bg;	
	bg.set("nmixtures", 3);
	bg.set("detectShadows", false);

	// Windows shown - not needed here:
	namedWindow("Frame", WINDOW_NORMAL);
	namedWindow("Drawing", WINDOW_AUTOSIZE);
	//namedWindow("Detection");
	//namedWindow("Background");
	//namedWindow("HSV");
	//namedWindow("YCBCR");

	// Position and frame size
	int posX = 0;
	int posY = 0;
	int lastX;
	int lastY;
	int frameH;
	int frameW;
	//Mat3b frame;


	int R = 0;
	int B = 0;
	int G = 0;
	int thickness = 3; // default brush size

	// Read background for our painting
	image = imread("white2.png", CV_LOAD_IMAGE_COLOR);
	if (!image.data)
	{
		cout << "Nie mo¿na otworzyæ obrazu!" << std::endl;
		return -1;
	}

	// not haar
	int no_of_fingers = 0;
	vector<vector<Point>> contours;
	
	vector<Point> palm_points;
	vector<pair<double, int> > distvec;
	vector<pair<Point, double> > palm_centers; // Center of tracked object
	

	//-------------------------------------------------------------------------------------//
	while(1)
	{
		cap >> frame;
		cap >> detection;

		flip(frame, frame, 1); //flips video	
		flip(detection, detection, 1); //flips video

		frameH = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
		frameW = cap.get(CV_CAP_PROP_FRAME_WIDTH);

		//SKIN COLOR DETECTION
		if (useSkinDet)
		{
			flip(hsv, hsv, 1); //flips video	
			flip(ycbcr, ycbcr, 1); //flips video	

			//blur(src, src, Size(3, 3));
			cvtColor(detection, hsv, CV_BGR2HSV);
			cvtColor(detection, ycbcr, COLOR_BGR2YCrCb);


			inRange(hsv, Scalar(0, 48, 80), Scalar(20, 255, 255), hsv);
			inRange(ycbcr, Scalar(0, 133, 77), Scalar(255, 173, 127), ycbcr);
		}

		//flip(hsv, hsv, 1); //flips video	
		//flip(ycbcr, ycbcr, 1); //flips video	

		//blur(src, src, Size(3, 3));
		//cvtColor(detection, hsv, CV_BGR2HSV);
		//cvtColor(detection, ycbcr, COLOR_BGR2YCrCb);

		//inRange(hsv, Scalar(0, 48, 80), Scalar(20, 255, 255), hsv);
		//inRange(ycbcr, Scalar(0, 133, 77), Scalar(255, 173, 127), ycbcr);

		CreateTextButton(frame, Point(0, 0), Point(100, 100), Scalar(255, 255, 255), "KOLOR", Point(5, 50));
		CreateTextButton(frame, Point(0, 100), Point(100, 200), Scalar(0, 255, 0), "KOLOR", Point(5, 150));
		CreateTextButton(frame, Point(0, 200), Point(100, 300), Scalar(240, 17, 17), "KOLOR", Point(5, 250));
		CreateTextButton(frame, Point(0, 300), Point(100, 400), Scalar(0, 0, 255), "KOLOR", Point(5, 350));
		CreateTextButton(frame, Point(540, 0), Point(640, 100), Scalar(0, 0, 0), "USUN", Point(545, 60));
		CreateTextButton(frame, Point(540, 300), Point(640, 400), Scalar(0, 0, 0), "Zapisz", Point(545, 350));
		CreateLineButton(frame, Point(540, 100), Point(640, 200), Point(550, 150), Point(630, 150),9);
		CreateLineButton(frame, Point(540, 200), Point(640, 300), Point(550, 250), Point(630, 250),3);


		// BACKGROUND FRAME OPERATOR
			
		//cout << "Palce: " << no_of_fingers << endl;
		//cout << frameH << " " << frameW << endl;
					
		cv::Rect maxRect; // 0 sized rect
		std::vector<Rect> hands;

		// Cascade detection mode switch
		switch (cascadeIndex)
		{
		case 1:
			hand_cascade.detectMultiScale(detection, hands, 1.1, 2, 0 | CV_HAAR_FIND_BIGGEST_OBJECT, Size(50, 50), Size(300, 300));
			break;
		case 2:
			hand_cascade2.detectMultiScale(detection, hands, 1.1, 2, 0 | CV_HAAR_FIND_BIGGEST_OBJECT, Size(50, 50), Size(300, 300));
			break;
		case 3:
			hand_cascade3.detectMultiScale(detection, hands, 1.1, 2, 0 | CV_HAAR_FIND_BIGGEST_OBJECT, Size(50, 50), Size(300, 300));
			break;
		case 4:
			hand_cascade3.detectMultiScale(detection, hands, 1.1, 2, 0 | CV_HAAR_FIND_BIGGEST_OBJECT, Size(50, 50), Size(300, 300));
			break;
		case 5:
			face_cascade.detectMultiScale(detection, hands, 1.1, 2, 0 | CV_HAAR_FIND_BIGGEST_OBJECT, Size(50, 50), Size(400, 400));
			break;
		case 6:
			eye_cascade.detectMultiScale(detection, hands, 1.1, 2, 0 | CV_HAAR_FIND_BIGGEST_OBJECT, Size(50, 50), Size(300, 300));
			break;
		case 7:
			nose_cascade.detectMultiScale(detection, hands, 1.1, 2, 0 | CV_HAAR_FIND_BIGGEST_OBJECT, Size(50, 50), Size(300, 300));
			break;
		}

		lastX = posX; //save x position as last
		lastY = posY; //save y position as last

		if (useHaar == true)
		{
			// Draw circles on the detected hands

			DrawCircles(frame, hands,maxRect,  posX,  posY);
		}

		if (lastX != 0 && lastY != 0 && posX != 0 && posY != 0)
		{
			if (std::abs(lastX - posX) < 30 && std::abs(lastY - posY) < 30) //aby zniwelowaæ b³êdne przeskoki
			{
				if (useHaar == false)
				{
					//line(image, Point(lastX, lastY), Point(posX, posY), Scalar(0, 255, 0), 3, 2);
					line(image, Point(lastX, lastY), Point(posX, posY), Scalar(R, B, G), thickness, 2);
				}
				else
				{
					//line(image, Point(lastX, lastY), Point(posX, posY), Scalar(240, 17, 17), 3, 2);
					line(image, Point(lastX, lastY), Point(posX, posY), Scalar(R, B, G), thickness, 2);
				}
				if(posX > 540 && posX < 640 && posY<100 && posY>0)
				{
					SaveFile(image);
					image = imread("white2.png", CV_LOAD_IMAGE_COLOR);
					DisplayMessage(frame, Point(150, 120), "Wyczyszczono");
				}
				if (posX > 0 && posX < 100 && posY<100 && posY>0)
				{
					R = 255;
					B = 255;
					G = 255;

					//putText(frame, "Wybrano bia³y", Point(150, 100), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(0, 0, 0), 2, 8, 0);
					DisplayMessage(frame, Point(150, 100), "Wybrano bialy");
				}
				if (posX > 0 && posX < 100 && posY<200 && posY>100)
				{
					R = 0;
					B = 255;
					G = 0;

					//putText(frame, "Wybrano zielony", Point(150, 100), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(0, 0, 0), 2, 8, 0);
					DisplayMessage(frame, Point(150, 100), "Wybrano zielony");

				}
				if (posX > 0 && posX < 100 && posY<300 && posY>200)
				{
					R = 240;
					B = 17;
					G = 17;

					//putText(frame, "Wybrano niebieski", Point(150, 100), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(0, 0, 0), 2, 8, 0);
					DisplayMessage(frame, Point(150, 100), "Wybrano niebieski");
				}
				if (posX > 0 && posX < 100 && posY<400 && posY>300)
				{
					R = 0;
					B = 0;
					G = 255;

					//putText(frame, "Wybrano czerwony", Point(150, 100), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(0, 0, 0), 2, 8, 0);
					DisplayMessage(frame, Point(150, 100), "Wybrano czerwony");
				}
				if (posX > 540 && posX < 640 && posY<200 && posY>100)
				{
					thickness = 9;
					//putText(frame, "Grubo", Point(150, 100), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(0, 0, 0), 2, 8, 0);
					DisplayMessage(frame, Point(150, 100), "Grubo");
				}
				if (posX > 540 && posX < 640 && posY<300 && posY>200)
				{
					thickness = 3;
					//putText(frame, "Mniej grubo", Point(150, 100), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(0, 0, 0), 2, 8, 0);
					DisplayMessage(frame, Point(150, 100), "Mniej grubo");
				}
				if (posX>540 && posX < 640 && posY < 400 && posY>300)
				{
					SaveFile(image);

					DisplayMessage(frame, Point(150, 100), "Zapisano");
				}
			}
		}


		//putText(frame, "TERAZ RYSUJE", Point(150, 50), CV_FONT_HERSHEY_COMPLEX, 1, Scalar(i, i, 255), 5, 8);
			

		// Display updated frames
		imshow("Frame", frame);
		imshow("Drawing", image);
		//imshow("Detection", detection);
		//imshow("Background", back);

		// Display skin detection frames
		if (useSkinDet)
		{
			imshow("HSV", hsv);
			imshow("YCBCR", ycbcr);
		}

		// Buttons wait
		waitkey = waitKey(1);
		switch (waitkey)
		{
			case '-':{
				break;
			}
			case ' ':{
				cascadeIndex++;
				if (cascadeIndex > 7)
				{
					cascadeIndex = 1;
				}
				cout << "ZMIANA TRYBU! " << cascadeIndex << endl;
				break;
			}
			case 's':{
				SaveFile(image);
				break;
			}
			case 'h':{
				if (useHaar == false)
				{
					//putText(frame, "OpenCV forever!", Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 3, Scalar(i, i, 255), 5, 8);
					cout << "ZMIANA TRYBU ON!" << endl;
					useHaar = true;
				}
				else
				{
					//putText(frame, "OpenCV forever!", Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 3, Scalar(i, i, 255), 5, 8);
					cout << "ZMIANA TRYBU OFF!" << endl;
					useHaar = false;
				}
				break;
			}
			case 'g':{
				if (useSkinDet == false)
				{
					//putText(frame, "OpenCV forever!", Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 3, Scalar(i, i, 255), 5, 8);
					cout << "ZMIANA TRYBU!" << endl;
					useSkinDet = true;
				}
				else
				{
					//putText(frame, "OpenCV forever!", Point(50, 50), CV_FONT_HERSHEY_COMPLEX, 3, Scalar(i, i, 255), 5, 8);
					cout << "ZMIANA TRYBU!" << endl;
					useSkinDet = false;
				}
				break;
			}
			case 'q':{
				return 0;
			}
		}
		waitkey = '-';
		
		//if (waitKey(10) >= 0) break;
	}
	return 0;
}
示例#13
0
int main()
{
    //경로지정하여 상 가져온다
	VideoCapture vc("C:\\Users\\youjin\\hyj\\video\\test1.avi");
	//VideoCapture capture;
    //capture.open(0);
      
    //capture.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    //capture.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
	if(!vc.isOpened())
    {
        cerr << "could not opened"<< endl;
        return 0;
    }
	
    namedWindow("original", 1); //원본영샹 창
    namedWindow("edited1", 2);  //이진화 띄우는 창 

    vector< vector<Point> > contours;
    BackgroundSubtractorMOG2 bg; //background 제거 - 배경을 사람으로부터 분리  

    // optical flow : 특정한 픽셀이 이동하는걸로 잡는 거 (rgb -> gray 로변환해서 )
    // 
    Mat gframe, pgframe; //grayframe , pregrayframe
    vector<Point2f> points1, points2; //vector.size() 로! 
    vector<uchar> status;
    vector<float> err;
    bool init = true;
    TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20,0.03);
    Size subPixWinSize(10,10), winSize(31,31);

	Mat frame, tframe;
    Mat nframe(240, 320, CV_16UC3);
	Mat img; // 카운터 
	//Mat counter=imread("C:\\Users\\youjin\\hyj\\img\\counter.jpg"); //카운터 고정이미지
    Mat back, fore;
    bool fvalid = true;
    list<Mat> q;
	int *count; // count 포인터 객체배열 선언  --> points1이 vector<Point2f>로 정의되어있으니까 
	
    while(true)
    {
		//int i,k=0;
        fvalid = true;
        try
        {
			vc >> frame; //영상을 프레임에 넣기 
			//고정 카운터 이미지 합치기 
			//	Mat imageROI = frame(Rect(300,80,counter.rows,counter.cols));
			//addWeighted(imageROI,1.0,counter,0.3,0,imageROI);

            bg.operator() (frame, fore,0);
  
            erode(fore, fore, Mat()); //침식->노이즈 제거
			erode(fore, fore, Mat());
            //dilate(fore, fore, Mat());  //팽창->노이즈 제거 

            findContours(fore, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
            drawContours(frame, contours, -1, Scalar(255,255,255),CV_FILLED);
			//thinning(fore,fore);
			// color transformation for optical flow
            cvtColor(frame, gframe, CV_BGR2GRAY);

            // gray scale to binary image 
            threshold(gframe, tframe, 250,255,0);

            
            // Harris corner detection 
			// harris corner : corner detect -> goodfeaturestrack으로 구현해놓은거다
			//코너가 더 두드러짐
			// 옵티컬 플로우할때 현재값이랑 preview값을 넣어줘 - > 비교하니까 initialize
            if(init){
				goodFeaturesToTrack(gframe, points1, 500, 0.01, 5, Mat(), 3, 0, 0.04); //point1의 크기가 결정!!!!!
				//point1의 크기가 결정된 다음에 count의 포인터배열을 할당하라 
				count = new int[points1.size()];
				init=false;
				//count = new int[points1.size()];
			}else if(!points2.empty()){
                //optical flow function.
				//points2 -> pgframe이 담고 points1 -> gframe이 담는다 
                calcOpticalFlowPyrLK(pgframe, gframe, points2, points1, status, err, winSize, 3, termcrit, 0, 0.001);
	
				//at 메소드 : 각 원소에 접근하기 위해 at(int x, int y)메소드 이용 
				// 컴파일시 메소드가 반환하는 타입을 알아야지  --> 행렬 내 타입과 일치하는지 아닌지 확인 
             for(i= k = 0; i < points2.size(); i++) {
                 // if point is inside of object
                 if( (int)tframe.at<uchar>(points1[i].x, points1[i].y) == 255) //gray에는 255가 하얀색 
                 {
					if ((points1[i].x - points2[i].x) > 0) //후 -> 전 (이동했니?)
					{
                     line(frame, points1[i], points2[i], Scalar(0, 0, 255), 1, 1, 0);
					 circle(frame, points1[i], 2, Scalar(255, 0, 0), 1, 1, 0);
                     line(gframe, points1[i], points2[i], Scalar(0, 0, 255), 1, 1, 0);
                     circle(gframe, points1[i], 1, Scalar(255, 0, 0), 1, 1, 0);
                 }else{
                     line(frame, points1[i], points2[i], Scalar(0, 255, 0), 1, 1, 0);
                     circle(frame, points1[i], 2, Scalar(255, 0, 0), 1, 1, 0);
                     line(gframe, points1[i], points2[i], Scalar(0, 255, 0), 1, 1, 0);
                     circle(gframe, points1[i], 1, Scalar(255, 0, 0), 1, 1, 0);
                 }
                  points1[k++] = points1[i]; //
                 }
             }
              goodFeaturesToTrack(gframe, points1, MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.04);
            }
			
			// 여기서부터 point1 edit 하면 된다 
			for(int j=0;j<points1.size();j++){
			//여기다가 if 문 돌려
				//count = new int[points1.size()];
				if((points1[j].x >=0 || points1[j].x <=500)||(points1[j].y >=0 || points1[j].y <=200) ){
					count[j]++;
					if(count[j]>0) //while문에 한번돌아갈때가 한프레임 한 점마다 150 프레임동안 카운트가 쌓이면 알람울려 
						cout << "beeeeeep" ;
						Beep(510,400); //1000 - 1초   --> 0.4초   "도"
				}else{
					//count[j]=0;
					//delete [] count;//사각형 밖으로 나간 점이있으면  -> 0 으로 초기화
				}
			//delete [] count;
			}
            
			std::swap(points2, points1); //찾은거바꾸고
            points1.clear(); //오래된거 없애고 
            gframe.copyTo(pgframe); 
			//delete [] count;
        }
        catch(Exception& e)
        {
            cerr << "exception : " << e.err << endl;
        }

        if(fvalid)
        {
            try
			{
				imshow("original", gframe);
                imshow("edited1", tframe); //이진화된거 
            }
            catch(Exception& e)
            {
                cerr << "exception : " << e.err << endl;   
            }
        }   
        if(waitKey(30) >= 0)
			break;
    }
	delete [] count;
}
示例#14
0
int main(){
	Mat frame, fore, back, medFilt, threshed, clone_threshed;
	//read the input video file frame by frame
	VideoCapture vid ("movie.wmv");
	if (!vid.isOpened()){
		cout << "Error opening media file " << endl;
		exit(EXIT_FAILURE);
	}
	//Take the initial image for getting the centers and other static information
	Mat initialImage;
	vid >> initialImage;
	Point2f center_point;
	if(initialImage.rows == 0){
		cout << "Faliure to read the initial image" << endl;
		exit (EXIT_FAILURE);
	}
	center_point.x = initialImage.cols/2;
	center_point.y = initialImage.rows/2;
	cout <<"The center point of the static image is :" <<center_point << endl;
	
	// Set the dialation and erosion mask
	Mat d_dilate = getStructuringElement( MORPH_ELLIPSE, Size (5,5), Point(0,0));
	Mat e_erode = getStructuringElement( MORPH_ELLIPSE, Size (5,5), Point(0,0));
	
	// Create a MOG2 based background subtraction object 
	BackgroundSubtractorMOG2 bg;

	// Set the initial parameters for Kalman Filter here
	Mat KalmanWin = Mat::zeros(initialImage.rows, initialImage.cols, CV_8UC3);
	KalmanFilter KF(4,2,0);

	KF.transitionMatrix = *(Mat_<float>(4,4) << 1,0,1,0,  0,1,0,1, 0,0,1,0, 0,0,0,1 );
	Mat_<float> measurement(2,1);
	measurement.setTo(Scalar(0));

	KF.statePre.at<float>(0) = center_point.x;
	KF.statePre.at<float>(1) = center_point.y;
	KF.statePre.at<float>(2) = 0;
	KF.statePre.at<float>(3) = 0;

	setIdentity(KF.measurementMatrix);
	setIdentity(KF.processNoiseCov, Scalar::all(1e-4));
	setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1));
	setIdentity(KF.errorCovPost, Scalar::all(0.1));
	// set all the named Windows here 
	namedWindow("VideoFrame", CV_WINDOW_AUTOSIZE);
	namedWindow("Processed1", CV_WINDOW_AUTOSIZE);
	namedWindow("Processed2", CV_WINDOW_AUTOSIZE);
	

	//============================================================================//
	//======================The main while loop===================================//
	//============================================================================//

	while (true){
		std :: vector <std :: vector <cv :: Point>> contours;
		std :: vector<Vec4i> hierarchy;
		vid >> frame;
		if (frame.rows == 0){
			destroyAllWindows();
		}
		bg.operator() (frame, fore);
		medianBlur(fore, medFilt, MEDIAN_FILTER_MASK);
		erode(medFilt, medFilt, e_erode);
		dilate(medFilt, medFilt, d_dilate);
		dilate(medFilt, medFilt, d_dilate);
		threshold(medFilt, threshed, BINARY_THRESHOLDING_VALUE,255,THRESH_BINARY);
		clone_threshed = threshed.clone();
		findContours(clone_threshed, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE,Point(0,0));
		vector <Rect> boundRect (contours.size());
		for (int i = 0; i < contours.size(); ++i){
			if (contourArea(contours[i]) > AREA_THRESHOLD ){
				boundRect[i] = boundingRect(Mat(contours[i]));
				rectangle(frame, boundRect[i].tl(), boundRect[i].br(), Scalar(0,0,255), 2,8,0);
				circle(KalmanWin, boundRect[i].tl(), 5, Scalar(255,0,0), -1, 8);
				Mat prediction = KF.predict();
				Point predictPoint( prediction.at<float>(0), prediction.at<float>(1) );
				measurement(0) = boundRect[i].tl().x;
				measurement(1) = boundRect[i].tl().y;
				Point measPoint(measurement(0), measurement(1));
				Mat estimated = KF.correct(measurement);
				Point statepoint(estimated.at<float>(0), estimated.at<float>(1));
				Point estimatedBR; 
				estimatedBR.x	= statepoint.x ;
				estimatedBR.y   = statepoint.y ;
				circle(KalmanWin, estimatedBR, 1, Scalar(0,255,0), -1, 8);
			}
		}
						
		imshow("Processed1", threshed);
		imshow("Processed2", KalmanWin);
		//imshow("VideoFrame", frame);
		waitKey(22);
	}

}
int main(int argc, const char *argv[])
{


	VideoCapture cap("/home/mac/Documents/PROJECT/Training_Sets/Father/father_divya (1).mp4");
	noframes=1;
	Mat3b frame;
	 cv::Mat frame1;
	 cv::Mat back;
	 cv::Mat fore;
	 std::vector<std::vector<cv::Point> > contours;
	 BackgroundSubtractorMOG2 bg;
	// bg.nmixtures = 3;
	 //bg.bShadowDetection = false;

	 int options=1,hands_count=0;
	double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
	double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
	Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));
	//VideoWriter oVideoWriter ("/home/mac/Documents/PROJECT/Output/Friend_t.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object
	//VideoWriter oVideoWriter1 ("/home/mac/Documents/PROJECT/Output/HSV2_s.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object
	Mat trace = Mat::zeros( Size(dWidth,dHeight), CV_8UC3 );
	bool flag=true;
	while(cap.read(frame ) and (options==1 or options==2))
	{
		noframes+=1;
		//if(noframes<5) continue;
		skin = GetSkin(frame);
		cvtColor(skin,skin,CV_RGB2GRAY);
		skin1 = skin> 50;
		blur( skin1, skin1, Size(3,3) );
		char* source_window = "Source";
		src_gray=skin1;
		Mat output;
		Point array[3];int sz=0;
		frame1=frame;
		skin1=draw_contour(src_gray,array,sz,frame1);
		bg.operator()(frame1,fore);
		bg.getBackgroundImage(back);
		cv::erode(fore,fore,cv::Mat());
		cv::dilate(fore,fore,cv::Mat());
		cv::findContours(fore,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
		cv::drawContours(frame1,contours,-1,cv::Scalar(0,0,255),2);
		cv::imshow("Frame",frame1);
		cv::imshow("Background",back);


		if(sz and flag)
		{
			flag=false;
			face=array[0];
		}
		//imshow("image",human);

		face_map(array,sz);
		if(sz>1)
		{
			MyCircle(trace,array[0],0);
		//	imshow("draw",trace);
		//	 oVideoWriter.write(trace);
		}

		//Mat aux=thresh_callback(0,0);
		//drawKeypoints(skin1, keypoints, output);
		skin2=frame;
		blur( skin1, skin1, Size( 5, 5 ) );
		imshow(source_window, skin1);
		// oVideoWriter1.write(skin1);
		if(sz>2) hands_count++;
		if(sz>1)
		{

		}
		waitKey(50);

	}

	destroyAllWindows();
	int ch=0,single=1;
	if(hands_count>=5)
			single++;



    return 0;
}
示例#16
0
文件: main.cpp 项目: goldragoon/CCTV
int main() {
	
	cv::VideoCapture	cam;
	cv::VideoWriter		writer;
	cv::Mat			frame, thresh, fore;
        CvBlobs			blobs;
	vector<cv::Mat>		blobs_image;

	char				waitKey_exit;
	int					waitKey_delay;
	char*				window_name_main; 
	int					i, j; 
        bool				isRobbed = false; 
        BackgroundSubtractorMOG2        bg;

        clock_t frame_start, frame_end;
        clock_t ed_start, ed_end; 

        vector<thread> threads;
        int cores = thread::hardware_concurrency();
	// initialize
	waitKey_exit = 'q';
	waitKey_delay = 100;
	window_name_main = "규진이에게 샤오미 보조배터리를!!"; 
	
	cam.open( 0 );
	cv::namedWindow( window_name_main );
        cv::namedWindow("blobing");
	writer = cv::VideoWriter("temp.avi", CV_FOURCC('M', 'J', 'P', 'G'),
							15.0, Size(VIDEO_WIDTH, VIDEO_HEIGHT),
							true);


        for(int i = 0; i < 10; i++)
        {
            cv::namedWindow(to_string(i));
        }
	// main loop
        while( true ) {
                frame_start = clock(); 
                // get frame from cam
                cam >> frame;
                
                if(writer.isOpened() && isRobbed)
                {
                    //if 0 frame is written by writer, remove file.
                    //writer.write(frame);

                }
                // frame to be gray scale
                bg.operator()(frame, fore, 0);
                cv::threshold(fore,fore, 250, 255, 0);
                
                
                for(int j = 0; j < cores; j++) 
                {
                    auto code = [&]()
                            {
                                for(int i = 0; i < 3; i++)
                                {
                                    erode(fore, fore, Mat());
                                    dilate(fore, fore, Mat());
                                }
                           };
                    threads.push_back(thread(code));
                }
                for(thread& t: threads)
                {
                    if(t.joinable()) t.join();
                }

                
                blobs = getBlobs(&fore, &frame);
                getBlobMat(&frame, blobs, &blobs_image);
                // blobing test
                if( true ) {
                        cout <<blobs_image.size() << endl;
                        for(int i = 0; i < blobs_image.size(); i++)
                        {
                            cv::imshow(to_string(i), blobs_image[i]);

                        }
                }
                imshow(window_name_main, frame);
                imshow("blobing", fore);
                //VideoWriter call distructor automatically.
                //frame release
                // delay
                if (cv::waitKey(waitKey_delay) == waitKey_exit ) {
                        break; 
                }

                frame_end = clock();
                printf("%f ms\n", (double)(frame_end-frame_start)/ CLOCKS_PER_SEC);
        }

	return 0;
}