Exemple #1
0
int main( int argc, char* argv[] )
{
    VideoCapture cap;
    TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03);
    Size winSize(10,10);


    bool needToInit =true;
    bool nightMode = false;
    bool reSift=true;

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        cap.open(argc == 2 ? argv[1][0] - '0' : 0);
    else if( argc == 2 )
        cap.open(argv[1]);

    if( !cap.isOpened() )
    {
        cout << "Could not initialize capturing...\n";
        return 0;
    }

    Mat gray, prevGray, image;
    vector<Point2f> points[2];
    VideoWriter wri;


    siftCorner cornerFinder;
    if(!cornerFinder.Init("track.config"))
    {
        cout<<"Can not Init cornerFinder"<<endl;
        return 0;
    }


    const string backGroundFilename="background.jpg";
    const float alpha=0.85;
    Mat backGround;
    backGround=imread(backGroundFilename,CV_LOAD_IMAGE_GRAYSCALE);

    backGroundModel bgModel;
    bgModel.Init(alpha,backGround);

    namedWindow("Track",1);
    const int step=1;

    reSiftValidate validator;
    Tracker *tracker=NULL;
    Object curObj;
    tracker=new LKTracker;
    //validator.init();
    //
    DECLARE_TIMING(myTimer);
    START_TIMING(myTimer);
    DECLARE_TIMING(siftTimer);
    for(;;)
    {
        Mat frame;
        for(int ii(0);ii<step;++ii)
        	cap >> frame;
        if( frame.empty() )
            break;

        frame.copyTo(image);
        cvtColor(image, gray, CV_BGR2GRAY);

	    if( nightMode )
            image = Scalar::all(0);

        medianBlur(gray,gray,3);
        bgModel.renewModel(gray);

        if( needToInit )
        {
            char fileNameBuffer[30];
            time_t rawtime;
            struct tm * timeinfo;

            time ( &rawtime );
            timeinfo = localtime ( &rawtime );

            sprintf(fileNameBuffer
                    ,"output/%d_%d_%d_%d_%d_%d.avi"
                    ,timeinfo->tm_year+1900,timeinfo->tm_mon,timeinfo->tm_mday,timeinfo->tm_hour,timeinfo->tm_min,timeinfo->tm_sec);
            wri.open(fileNameBuffer,CV_FOURCC('X','V','I','D'),50,image.size(),true);
            if(!wri.isOpened())
            {
                cout<<"can not init the writer"<<endl;
                return 0;
            }
            needToInit = false;
            tracker->Init(gray);
        }

        if(reSift)
        {
            START_TIMING(siftTimer);
            cout<<"reSift"<<endl;

            Mat Mask;
            bgModel.substractModel(gray,Mask);
            reSift=false;

            cornerFinder.goodFeatures(gray,curObj,Mask);

            cout<<"reSift Done"<<endl;
            STOP_TIMING(siftTimer);
            tracker->setObject(curObj);
        }
        else
        {
        	tracker->Process(gray);
        	curObj=tracker->getObject();
        	curObj.draw(image);
            reSift=!validator.validate(curObj);
        }
        imshow("Track", image);
        wri<<image;

        char c;
        c=(char)waitKey(2);
        if( c == 27 )
            break;
        switch( c )
        {
        case 'r':
        case 'c':
        case 'R':
        case 'C':
            points[1].clear();
            reSift=true;
            cout<<"reSift Type four"<<endl;
            break;
        case 'n':
            nightMode = !nightMode;
            break;
        case ' ':
            waitKey(-1);
            break;
        default:
            ;
        }

        std::swap(points[1], points[0]);
        swap(prevGray, gray);
    }
    STOP_TIMING(myTimer);
    printf("Execution time: %f ms.\n", GET_TIMING(myTimer));
    printf("sift Execution time: %f ms.\n", GET_TIMING(siftTimer));
    printf("sift average Execution time: %f ms.\n", GET_AVERAGE_TIMING(siftTimer));

    if(!tracker)
    	delete tracker;
    return 0;
}
Exemple #2
0
	void TrackingRoutine()
	{
		int64 start, finish;
		start = getTickCount();
		capture.read(curr_bgr_frame);

		if (curr_bgr_frame.empty())
		{
			running = false;
			return; // I DON'T LIKE IT
		}

		Mat curr_hsv_frame;
		cvtColor(curr_bgr_frame, curr_hsv_frame, CV_BGR2HSV);
		cvtColor(curr_bgr_frame, curr_gray, CV_BGR2GRAY);
		vector <Point2f> prev_corner, cur_corner;
		vector <Point2f> prev_corner2, cur_corner2;
		vector <uchar> status;
		vector <float> err;

		goodFeaturesToTrack(prev_gray, prev_corner, 200, 0.1, 30);
		calcOpticalFlowPyrLK(prev_gray, curr_gray, prev_corner, cur_corner, status, err);

		for (size_t i = 0; i < status.size(); i++) {
			if (status[i]) {
				prev_corner2.push_back(prev_corner[i]);
				cur_corner2.push_back(cur_corner[i]);
			}
		}
		// translation + rotation only
		if (prev_corner2.size() > 0 && cur_corner2.size() > 0)
		{
			current_transform = estimateRigidTransform(prev_corner2, cur_corner2, false); // false = rigid transform, no scaling/shearing
		}
		if (current_transform.rows == 0)
		{
			current_transform = previous_transform.clone();
		}

		///Diff Section
		Mat stabilized, stab_diff;
		warpAffine(prev_gray, stabilized, current_transform, prev_gray.size());
		absdiff(stabilized, curr_gray, stab_diff);
		AddToDebugImages(stab_diff, "stab_diff");

		Mat rotated_block(prev_gray.size(), prev_gray.type(), Scalar(255));
		int dx = current_transform.at<double>(0, 2);
		int dy = current_transform.at<double>(1, 2);
		int thickness = int(sqrt(dx*dx + dy*dy));
		rectangle(rotated_block, Rect(0, 0, rotated_block.cols, rotated_block.rows), Scalar(0), thickness);
		warpAffine(rotated_block, rotated_block, current_transform, rotated_block.size());
		bitwise_and(rotated_block, stab_diff, stab_diff);
		AddToDebugImages(rotated_block, "rotated-block");

		stab_diff = Close(stab_diff, "stab_diff");
		stab_diff = Blur(stab_diff, "stab_diff");
		stab_diff = Threshold(stab_diff, "stab_diff");


		//Color Section
		Mat hsv_in_range;
		inRange(curr_hsv_frame, hsv_min, hsv_max, hsv_in_range);
		AddToDebugImages(hsv_in_range, "hsv_in_range");

		hsv_in_range = Close(hsv_in_range, "hsv_in_range");
		hsv_in_range = Blur(hsv_in_range, "hsv_in_range");
		hsv_in_range = Threshold(hsv_in_range, "hsv_in_range");

		Mat hsv_in_expanded_range;
		Scalar hsv_min_expanded = hsv_min - HSV_RANGE_ADDER*(hsv_max - hsv_min);
		Scalar hsv_max_expanded = hsv_max + HSV_RANGE_ADDER*(hsv_max - hsv_min);
		inRange(curr_hsv_frame, hsv_min_expanded, hsv_max_expanded, hsv_in_expanded_range);
		AddToDebugImages(hsv_in_expanded_range, "hsv_in_expanded_range");

		Mat canny_output;
		vector<vector<Point> > canny_contours;
		vector<Vec4i> canny_hierarchy;

		Canny(curr_gray, canny_output, 80, 240, 3);
		/// Find contours
		findContours(canny_output, canny_contours, canny_hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));

		/// Draw contours
		Mat drawing = Mat::zeros(canny_output.size(), CV_8UC1);
		for (int i = 0; i< canny_contours.size(); i++)
		{
			Scalar color = Scalar(255, 255, 255);
			drawContours(drawing, canny_contours, i, color, 2, 8, canny_hierarchy, 0, Point());
		}
		AddToDebugImages(canny_output, "conrours");
		AddToDebugImages(drawing, "other_contours");

		//Union Section
		Mat raw_mask;
		//bitwise_and(diff_closed_blur_threshold, hsv_in_range, raw_mask);
		double lambda = 0.5;
		/*int corners_in_object = 0;
		for (Point2f corner : cur_corner2)
		{
		if ()
		}*/
		raw_mask = lambda*stab_diff + (1 - lambda)*hsv_in_range;
		AddToDebugImages(raw_mask, "raw_mask");

		raw_mask = Threshold(raw_mask, "raw_mask");
		raw_mask = Close(raw_mask, "raw_mask");
		//raw_mask = Blur(raw_mask, "raw_mask");
		raw_mask = Threshold(raw_mask, "raw_mask");

		Rect object_bounding_rectangle;
		Point2d last_position;
		vector< vector<Point> > contours;
		vector<Vec4i> hierarchy;

		findContours(raw_mask, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);  // retrieves external contours

		for (vector<Point> contour : contours)
		{
			object_bounding_rectangle = boundingRect(contour);
			rectangle(curr_bgr_frame, object_bounding_rectangle, Scalar(0, 150, 0));
		}
		int x_pos = -1;
		int y_pos = -1;


		if (contours.size() > 0)//stalefix. //TODO:find a better solution
		{
			object_bounding_rectangle = boundingRect(contours.back());
			rectangle(curr_bgr_frame, object_bounding_rectangle, Scalar(0, 0, 0));
			x_pos = object_bounding_rectangle.x + object_bounding_rectangle.width / 2;
			y_pos = object_bounding_rectangle.y + object_bounding_rectangle.height / 2;
			WritePosition(x_pos, y_pos);
		}

		finish = getTickCount();
		double seconds = getTickFrequency() / (finish - start);
		putText(curr_bgr_frame, to_string(seconds), Point(10, 30), CV_FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 0));

		if (debug)
		{
			ShowDebugImages();
			char* text = new char[10];
			sprintf(text, "x:%d x:%d", x_pos, y_pos);
			putText(curr_bgr_frame, text, object_bounding_rectangle.tl(), FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 0));
			
			for (int i = 0; i < prev_corner2.size(); i++)
			{
				arrowedLine(curr_bgr_frame, prev_corner2[i], cur_corner2[i], CV_RGB(0, 255, 0));
			}
		}
		else
		{
			cvDestroyWindow(DEBUG_WINDOW);
		}
		if (recording)
		{
			if (tracking_recorder.isOpened())
			{
				tracking_recorder.write(curr_bgr_frame);
			}
			else throw exception("well shit");
			circle(curr_bgr_frame, Point(10, 10), 8, Scalar(0, 0, 255), -1);
		}
		if (mouse_is_dragging)
		{
			rectangle(curr_bgr_frame, initial_click_point, current_mouse_point, Scalar(0, 0, 0));
		}

		imshow(MAIN_WINDOW, curr_bgr_frame);
		previous_transform = current_transform.clone();
		prev_gray = curr_gray.clone();

	}
Exemple #3
0
void main(int argc,char *argv[])
{
	string file_in,file_out;
	int threshold;

	if(argc!=1){
		file_in = argv[1];
		string str = argv[2];
		threshold = atoi(str.c_str());;
		file_out = argv[3];
	}else{
		cout<<"Please input the input file:";
		cin>>file_in;
		cout<<"Please input the threshold:";
		cin>>threshold;
		cout<<"Please input the output file:";
		cin>>file_out;
	}

    /** 打开输入视频文件 */
    cv::VideoCapture vc;
    vc.open(file_in);
    
    if ( vc.isOpened() )
    {
        /** 打开输出视频文件 */
        VideoWriter vw;
        vw.open(file_out, // 输出视频文件名
                (int)vc.get( CV_CAP_PROP_FOURCC ), 
                (double)vc.get( CV_CAP_PROP_FPS ), 
                cv::Size( (int)vc.get( CV_CAP_PROP_FRAME_WIDTH ), (int)vc.get( CV_CAP_PROP_FRAME_HEIGHT ) ), // 视频大小
                false ); // 是否输出彩色视频

        /** 如果成功打开输出视频文件 */
        if ( vw.isOpened() )
        {
            while ( true )
            {
                /** 读取当前视频帧 */
                cv::Mat in;
				vc >> in;

                /** 若视频读取完毕,跳出循环 */
                if ( in.empty() )
                {
                    break;
                }

				IplImage s = in;//原图
				IplImage *color = &s;

				cvShowImage(file_in.c_str(), color );//显示原图

				char c = cvWaitKey(30);//等待

				//转为灰度图
				IplImage *gray = cvCreateImage(cvGetSize(color),  8,1);
				cvCvtColor(color,gray,CV_BGR2GRAY);

				//转为二值图
				IplImage *binary = cvCreateImage(cvGetSize(gray),  8,1);//二值图
				cvThreshold(gray,binary,threshold,255,CV_THRESH_BINARY);


				//插入文字
				CvFont font;
				double hscale = 0.5;
				double vscale = 0.5;
				int linewidth = 1;
				cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX |  CV_FONT_ITALIC,hscale,vscale,0,linewidth); 
				CvScalar textColor =cvScalar(255,255,255);
				CvPoint textPos =cvPoint(0,15);
				cvPutText(binary,"Wang Zhefeng 3110000026", textPos, &font,textColor);

				//显示输出图像
				cvShowImage(file_out.c_str(), binary );

                /** 将视频写入文件 */
				Mat out(binary);
                vw << out;
            }
        }
    }
Exemple #4
0
int main( int argc, char *argv[] ){
    int max_offset = 280;
    int cmp_width;
 
    char *name1 = argv[1];
 
    VideoCapture cap;
    VideoWriter out;
  
    cap.open(name1);

    int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC));
    out.open("out1.avi",
	     ex,
	     60,
	     Size(1280, 960),
	     false);
    char s[5];
    s[4] = 0;
    memcpy(s, &ex, 4);
    printf("%s\n", s); 
 
    if(!cap.isOpened())  // check if we succeeded
        return -1;
    Mat mat;
    
    int nframes = cap.get(CV_CAP_PROP_FRAME_COUNT);
   
    for (int i = 0; i < 5; i++) { 
	mat = GetFrame(cap);
    } 
    int sy, sx;
    
    sy = mat.size().height;
    sx = mat.size().width;
    cmp_width = sx/4; 
  
    Mat m0 = mat(Rect(300, 300, sx-600, sy-600)).clone();

    Mat sum = mat.clone();
    sum = Scalar(0);

    double t = clock();
    int cnt = 0;
 
    for (int i = 0; i < nframes - 5; i++) {

	mat = GetFrame(cap);

    	m0 = mat(Rect(300, 300, sx-600, sy-600)).clone();

	Point loc1 = match(m0, mat);
        
	//cout << loc1; 
	int dx = loc1.x;
	int dy = loc1.y;
 
	char skip = 0;
	
	if (abs(dx) > max_offset || abs(dy) > max_offset) {
		skip = 1;
	}

	if (dx <= -max_offset) dx = -max_offset;
	if (dy <= -max_offset) dy = -max_offset; 
	if (dx >= max_offset) dx = max_offset;
	if (dy >= max_offset) dy = max_offset;	
	
	copyMakeBorder(mat, mat,
                       abs(dy),
                       abs(dy),
                       abs(dx),
                       abs(dx),
                       BORDER_CONSTANT, Scalar::all(0.0));

	Mat mx =  mat(Rect(abs(dx) + dx,
			   abs(dy) + dy,
			   sx, sy)); 
	
	normalize(mx, mx, 0, 1, NORM_MINMAX, CV_32F);
	
      
	cvWaitKey(1);
	
	if (skip == 0) {
		sum = sum + mx;
	}
        
	cnt++;
	printf("%d\n", i);	
	if (cnt == 25) {
		normalize(sum, sum, 0, 1, NORM_MINMAX, CV_32F);	
 		//m0 = sum(Rect(300, 300, sx-600, sy-600)).clone();

		imshow("m0", sum);
		cnt = 0;
		Mat tmp;

		normalize(sum, tmp, 0, 255, NORM_MINMAX, CV_32F);
		tmp.convertTo(tmp, CV_8U);
		//imshow("tmp",tmp);	
		out << tmp;
		sum = Scalar(0);
	}
    };
}
int main(int argc, char *argv[])
{
    int frame_num = 0;
    int non_decode_frame =0;
    int count = 1, idx =0;
    int frame_pos =0;
    int p = 0;
	vector<vector<Point> > contours;
  vector<Vec4i> hierarchy;
    MOG2 = createBackgroundSubtractorMOG2(); //MOG2 approach
    std::cout<<"Video File "<<argv[1]<<std::endl;
   // cv::VideoCapture input_video(argv[1]);
    cv::VideoCapture cap("IM-4559-%04d.png");
    namedWindow("My_Win",1);
    cvSetMouseCallback("My_Win", mouse_click, 0);
    sleep(1);
    while(cap.grab())
    {
        cap_img.release();
	p++;
        if(cap.retrieve(cap_img))
        {
            imshow("My_Win", cap_img);
            if(!got_roi)
            {
                            //Wait here till user select the desire ROI
                waitKey(0);
            }
            else
            {
                std::cout<<"Got ROI disp prev and curr image"<<std::endl;
                std::cout<<"PT1"<<pt1.x<<" "<<pt1.y<<std::endl;
                std::cout<<"PT2"<<pt2.x<<" "<<pt2.y<<std::endl;
			
		if(vw.isOpened()){
			std::cout<<"VW Opened\n";
		}
                Mat curr_img_t1;
                Mat roi2(cap_img,Rect(pt1, pt2));
                Mat curr_imgT = roi2.clone();
      		MOG2->apply(curr_imgT, fgMaskMOG2);
        	//get the frame number and write it on the current frame
        	stringstream ss;
        	rectangle(curr_imgT, cv::Point(10, 2), cv::Point(100,20),cv::Scalar(255,255,255), -1);
        	ss << vw.get(CAP_PROP_POS_FRAMES);
        	string frameNumberString = ss.str();
        	putText(curr_imgT, frameNumberString.c_str(), cv::Point(15, 15),
                FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
		float morph_size = 2;
		Mat grad;
		Mat canny_output;
		Mat thrld;
		Mat element = getStructuringElement(MORPH_RECT,Size(2*morph_size+1,2*morph_size+1), Point(morph_size,morph_size));
                morphologyEx(fgMaskMOG2,grad,MORPH_GRADIENT,element, Point(-1,-1),1);
		Canny(curr_imgT, canny_output, thresh,thresh*2 , 3);	
		findContours( canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) );
		Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
  for( int i = 0; i< contours.size(); i++ )
     {
       Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
       drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
     }
	namedWindow( "Contours", WINDOW_AUTOSIZE );
  imshow( "Contours", drawing );
		threshold(grad,thrld,200,0,3);
		char file_name[100];
		sprintf(file_name, "final%d.png",p);
        	//show the current frame and the fg masks
                imwrite(file_name,drawing);
		imshow("background", fgMaskMOG2);
                waitKey(30);
                        }
                  }
	std::cout<<p<<std::endl;
}
vw.release();
}
Exemple #6
0
int main(int argc, char** argv)
{
   Options o;
   parse_command_line(argc, argv, o);

   bool use_camera;
   VideoCapture cap;
   VideoWriter writer;

   // Use filename if given, else use default camera
   if( !o.infile.empty() )
   {
      cap.open(o.infile);
      use_camera = false;
   }
   else
   {
      cap.open(0);
      use_camera = true;
   }

   if( !cap.isOpened() )
   {
      cerr << "Failed to open capture device" << endl;
      exit(2);
   }

   if( !o.outfile.empty() )
   {
      int fps = cap.get(CV_CAP_PROP_FPS);
      int width = cap.get(CV_CAP_PROP_FRAME_WIDTH);
      int height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
      writer.open(o.outfile, CV_FOURCC('j', 'p', 'e', 'g'), fps, Size(width, height));
      if( !writer.isOpened() )
      {
	 cerr << "Could not open '" << o.outfile << "'" << endl;
	 exit(1);
      }
      use_camera = false;
   }

   // Open window and start capture
   namedWindow(WINDOW, CV_WINDOW_FREERATIO | CV_GUI_NORMAL);


   StateData d(o.num_particles, o.use_lbp);
   State state = state_start;
   Mat frame, gray;

   lbp_init();

   // Main loop

   for(;;)
   {

      // Start timing the loop
      

      // Capture frame
      if( !d.paused)
      {
	 cap >> frame;
	 if(frame.empty())
	 {
	    cerr << "Error reading frame" << endl;
	    break;
	 }
      }
      if( use_camera )
      {
	 flip(frame, d.image, 1);
      }
      else
      {
	 frame.copyTo(d.image);
      }
      
      // Set up all the image formats we'll need
      if(d.use_lbp)
      {
	 cvtColor(d.image, gray, CV_BGR2GRAY);
	 lbp_from_gray(gray, d.lbp);
      }
      else
      {
	 if( d.lbp.empty() )
	    d.lbp = Mat::zeros(d.image.rows, d.image.cols, CV_8UC1);
      }

      // Handle keyboard input
      char c = (char)waitKey(10);
      if( c == 27 )
	 break;
      switch(c)
      {
	 case 'p':
	    d.paused = !d.paused;
	    break;

	 case 'c':
	    cout << "Tracking cancelled." << endl;
	    state = state_start;
	    break;

	 case 'd':
	    d.draw_particles = !d.draw_particles;
	    cout << "Draw particles: " << d.draw_particles << endl;
	    break;
      }

      // Process frame in current state
      state = state(d);


      // Elapsed time in seconds
/*
      timeval end_time;
      gettimeofday(&end_time, 0);
      float dt = (float)(end_time.tv_sec - start_time.tv_sec) + ((float)(end_time.tv_usec - start_time.tv_usec)) * 1E-6;       
      cout << "Frame rate: " << 1.f / dt << endl;
*/
      Mat target_display_area(d.image, Rect(d.image.cols - d.selection.width, 0, d.selection.width, d.selection.height));
	 d.target.copyTo(target_display_area);
  

      imshow(WINDOW, d.image);
   }
int main(int argc, char* argv[]){

	VideoCapture cap(0); // open the default camera
	if(!cap.isOpened()){ // check if we succeeded
		return -1;
	}

	VideoWriter VOut;
	VOut.open("Detectors.avi", CV_FOURCC('M', 'P', 'E', 'G'), 30, Size(640, 480), 1);

	Mat edges;
	namedWindow("Screen",1);
	//namedWindow("original", 1);
	char setting = 'z'; // Canny default
	Mat frame;
	Mat gray;
	Mat last_frame;

	cout << "Help: \n" << "t -- threshold\n" << "c -- canny\n" << "x -- corners\n" << "d -- diff\n" << "Any other key -- lines\n" << endl;

	for(;;){  // Loop through, getting new images from the camera.
		Mat thresh_out;
		Mat canny_out;
		Mat corner_out;
		Mat out;
		//cout << "Setting " << setting + 'a' << endl;
		cap >> frame; // get a new frame from camera
		if (last_frame.empty()){
			cvtColor(frame, last_frame, CV_BGR2GRAY);
		}else{
			last_frame = gray.clone();  // Which was the last gray frame, before I update it. 
		}
		cvtColor(frame, gray, CV_BGR2GRAY);
		if (setting == 't'){
	// Threshold
			threshold(gray, out, 100, 255, THRESH_BINARY); // Threshold: 100. White value: 255
		}else if (setting == 'c'){
	// Canny
			GaussianBlur(gray, edges, Size(7,7), 1.5, 1.5);
			Canny(edges, out, 5, 30, 3);
		}else if (setting == 'x'){
	// Corners
	 		vector<Point2f> corners;
	 		double qualityLevel = 0.04;
			double minDistance = 30;
			int blockSize = 5;
			bool useHarrisDetector = false;
			double k = 0.04;
			int maxCorners = 100;

			corner_out = gray.clone();

			goodFeaturesToTrack( gray, corners, maxCorners, qualityLevel,
			               minDistance, Mat(), blockSize, useHarrisDetector, k );

			int r = 4;
			Size winSize = Size( 5, 5 );
			Size zeroZone = Size( -1, -1 );
			TermCriteria criteria = TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 );

			/// Calculate the refined corner locations
			cornerSubPix( gray, corners, winSize, zeroZone, criteria );

	 	    for( int i = 0; i < corners.size(); i++ )
			   { circle( corner_out, corners[i], r, Scalar(255, 255, 255), -1, 8, 0 ); }
			out = corner_out;
		}else if (setting == 'd'){
	// Diff
			absdiff(gray, last_frame, out);
		}
		else{
	// Lines
			GaussianBlur(gray, edges, Size(7,7), 1.5, 1.5);
			Canny(edges, canny_out, 7, 30, 3);
			vector<Vec4i> lines;
			  HoughLinesP(canny_out, lines, 1, CV_PI/180, 50, 50, 10 );
			  for( size_t i = 0; i < lines.size(); i++ )
			  {
				Vec4i l = lines[i];
				line( edges, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA);
			  }
			out = edges;
		}

		char key = waitKey(30);
		if (key != 0 && key != -1){
			//cout << "setting is " << key + 0<< endl;
			setting = key;
			key = 0;
		}
	
		imshow("Screen", out);  // Show the image on the screen
		VOut << out;
		//imshow("original", gray);
		//if(waitKey(30) >= 0) break;
	}

// the camera will be deinitialized automatically in VideoCapture destructor

return 0;
}
int main( int argc, char** argv )
{
	ProcessOptions(argc, argv);

	cvRedirectError(cvStdErrReport);

    namedWindow( WindowName, 1 );
//    setMouseCallback( "LK Demo", onMouse, 0 );

    Mat image;
    Mat rsImage;
    ShowMsg("Waiting...");
    
    const int FIND=0;
	const int ACQUIRE=1;
	const int TRACK=2;

	const int fontFace = FONT_HERSHEY_PLAIN;
	const double fontScale = 1.5;
	const int thickness=1;

    const int MAX_COUNT = 500;
    TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03);
    Size subPixWinSize(10,10), winSize(31,31);

    int state = FIND;
    vector<Point2f> trackPoints[2];
    vector<uchar> trackStatus;

    Mat prevGrey;
	Mat grey;
	map<unsigned int, TrackedPoint> trackedPoints;

	map<unsigned int, TrackedGroup> trackedGroups;
    int largestTrackedGroup = -1;

    double totalTime=0;
    double findFeatureTime=0;
    double lkTime=0;
    double featureProcessTime=0;
    int nframes = 0;
    for(;;)
    {
    	if(cap.isOpened()) {

    		nframes++;

			Mat frame;
			cap >> frame;
			if( frame.empty() )
				break;	// TODO: handle properly

			if(IsCam) {
				cv::resize(frame, rsImage, Size(), 0.25, 0.25);
			}
			else {
				frame.copyTo(rsImage);
			}

			cvtColor(rsImage, grey, CV_BGR2GRAY);


			// Image processing here
			if(state==FIND) {
				printf("State=FIND, clear tracked groups\n");
				// No groups tracked
				trackedGroups.clear();
				largestTrackedGroup = -1;

				// Find points to track
				double tFindFeatureStart = getTimeUsecs();

				goodFeaturesToTrack(grey, trackPoints[1], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.04);
	            cornerSubPix(grey, trackPoints[1], subPixWinSize, Size(-1,-1), termcrit);

	            findFeatureTime += getTimeUsecs() - tFindFeatureStart;


	            if(trackPoints[1].size() > 0) {
	            	state = ACQUIRE;
	            }
			} else if(state==ACQUIRE || state==TRACK) {
				// Got some points, do a Lukas-Kanade
				printf("State=%d, Lukas-Kanade\n", state);

	            vector<float> err;
	            if(prevGrey.empty())
	                grey.copyTo(prevGrey);

	            double tLkStart = getTimeUsecs();

	            calcOpticalFlowPyrLK(prevGrey, grey, trackPoints[0], trackPoints[1], trackStatus, err, winSize,
	                                 3, termcrit, 0, 0.001);

	            lkTime += getTimeUsecs() - tLkStart;

	            double tFeatureProcessStart = getTimeUsecs();

	            // Calculate velocities for tracked points
	            for(unsigned int n=0; n<trackStatus.size(); n++) {
	            	if(trackStatus[n]) {
	            		Point2f dp = Point2f(trackPoints[1][n].x - trackPoints[0][n].x, trackPoints[1][n].y - trackPoints[0][n].y);

	            		if(trackedPoints.count(n)) {
        					TrackedPoint &tp = trackedPoints[n];

        					tp.pos = trackPoints[1][n];
	            			tp.velHistory[tp.histIdx++] = dp;
	            			if(tp.histIdx >= NUM_VELOC_FRAMES) {
	            				tp.histFill = NUM_VELOC_FRAMES;
	            				tp.histIdx = 0;
	            			} else if(tp.histFill < NUM_VELOC_FRAMES) {
	            				tp.histFill = tp.histIdx;
	            			}

	            			tp.UpdateAverageVelocity();
	            		}
	            		else {
	            			// no point yet
	            			TrackedPoint newtp(dp);
        					newtp.pos = trackPoints[1][n];

	            			trackedPoints[n] = newtp;
	            		}

	            		TrackedPoint &dtp = trackedPoints[n];
//	            		printf("Tracked point %d: (obj %d, moving %d [%f,%f], hf=%d) (%f,%f)\n",
//	            				n, dtp.objectId, dtp.IsMoving(), dtp.averageVel.x, dtp.averageVel.y,
//	            				dtp.histFill,
//	            				trackPoints[1][n].x, trackPoints[1][n].y);

	            	}
	            }

	            // Identify (new) objects
	            bool averageDone = false;
	            for(map<unsigned int, TrackedPoint>::iterator it=trackedPoints.begin(); it != trackedPoints.end(); it++) {

	            	TrackedPoint &tp = (*it).second;

	            	averageDone |= tp.HasValidAverage();

	            	if(tp.objectId < 0 && tp.IsMoving()) {
	            		// Not bound to an object and is moving

	            		for(map<unsigned int, TrackedPoint>::iterator iit=trackedPoints.begin(); iit != trackedPoints.end(); iit++) {
	            			TrackedPoint &itp = (*iit).second;
	            			if(itp.objectId >= 0 && itp.IsMoving()) {
	            				if(itp.CalcVelDiff(tp) < GROUP_THRESH) {
	            					// We are in the same object
	            					tp.objectId = itp.objectId;
	    	            			printf("Found object %d for point %d (%f,%f) vel (%f,%f)\n",
	    	            					tp.objectId, (*it).first, tp.pos.x, tp.pos.y, tp.averageVel.x, tp.averageVel.y);
	            					break;
	            				}
	            			}
	            		}

	            		if(tp.objectId < 0) {
	            			// No connected object found
	            			static int nextObjectId = 0;
	            			tp.objectId = nextObjectId++;
	            			printf("Added new object %d for point %d (%f,%f) vel (%f,%f)\n\n",
	            					tp.objectId, (*it).first, tp.pos.x, tp.pos.y, tp.averageVel.x, tp.averageVel.y);
	            		}
	            	}
	            }

	            // zero point counters and sum
	            for(map<unsigned int, TrackedGroup>::iterator ig= trackedGroups.begin(); ig != trackedGroups.end(); ig++) {
	            	TrackedGroup &tg = (*ig).second;
	            	tg.nPoints = 0;
	            	tg.sum = Point2f(0,0);
	            	tg.max = Point2f(-INFINITY,-INFINITY);
	            	tg.min = Point2f(INFINITY,INFINITY);
	            }

	            for(map<unsigned int, TrackedPoint>::iterator it=trackedPoints.begin(); it != trackedPoints.end(); it++) {
					TrackedPoint &tp = (*it).second;
					if(tp.objectId >= 0 && tp.IsMoving()) {
						if(trackedGroups.count(tp.objectId)) {
							TrackedGroup &tg = trackedGroups[tp.objectId];
							tg.nPoints++;
							tg.sum.x += tp.pos.x;
							tg.sum.y += tp.pos.y;
							if(tg.max.x < tp.pos.x) tg.max.x = tp.pos.x;
							if(tg.max.y < tp.pos.y) tg.max.y = tp.pos.y;
							if(tg.min.x > tp.pos.x) tg.min.x = tp.pos.x;
							if(tg.min.y > tp.pos.y) tg.min.y = tp.pos.y;
						}
						else {
							TrackedGroup newtg;
							newtg.nPoints = 1;
							newtg.sum = tp.pos;
							newtg.max = tp.pos;
							newtg.min = tp.pos;
							trackedGroups[tp.objectId] = newtg;
						}
					}
	            }

	            unsigned int npMax = 0;
	            for(map<unsigned int, TrackedGroup>::iterator ig= trackedGroups.begin(); ig != trackedGroups.end(); ig++) {
	            	TrackedGroup &tg = (*ig).second;
	            	tg.centre.x = tg.sum.x / tg.nPoints;
	            	tg.centre.y = tg.sum.y / tg.nPoints;

	            	if(tg.nPoints > npMax) {
	            		npMax = tg.nPoints;
	            		largestTrackedGroup = (*ig).first;
	            	}
	            	// printf("  Tracked group %d: np=%d, c=%f,%f\n", (*ig).first, tg.nPoints, tg.centre.x, tg.centre.y);
	            }


	            if(trackedGroups.size() > 0) {
	            	printf("%d tracked groups; state=>TRACK\n", (int)trackedGroups.size());
	            	state = TRACK;
	            }
	            else if(averageDone) {
	            	printf("Zero tracked groups after average, state=>FIND\n");
	            	state = FIND;	// reset the state if we have averaged points but nothing tracked
	            }
	            else {
	            	printf("Averaging, state=%d\n", state);
	            }

	            featureProcessTime += getTimeUsecs() - tFeatureProcessStart;
			}

			// debug
			putText(rsImage, state==0 ? "Find" : (state==1 ? "Acquire" : "Track"),
					Point(16,32), fontFace, fontScale,
			        Scalar::all(255), thickness, 8);

			if(largestTrackedGroup >= 0) {
				TrackedGroup &tg = trackedGroups[largestTrackedGroup];
	            printf("Largest tracked group %d, np=%d, c=%f,%f, mm=%f,%f, gs=%d,%d\n", largestTrackedGroup, tg.nPoints,
	            		tg.centre.x, tg.centre.y, tg.minMaxMean().x, tg.minMaxMean().y, tg.groupSize().width, tg.groupSize().height);
				ellipse(rsImage, tg.centre, Size(40,40), 0., 0., M_PI, Scalar(255,200,200), 4, 8, 0);
			}

            for(unsigned int i = 0; i < trackPoints[1].size(); i++ ) {
            	Scalar pointColour;
            	if((state==ACQUIRE || state==TRACK) && trackStatus.size() > i && trackStatus[i]) {
            		if(trackedPoints.count(i)) {
            			TrackedPoint &tp = trackedPoints[i];
            			if(tp.IsMoving()) {
                			pointColour = Scalar(0,255,0);
                        	// circle( rsImage, trackPoints[1][i], 3, pointColour, -1, 8);
            			}
            			else {
            				pointColour = Scalar(255,0,0);
            			}
            		}
            		else {
            			pointColour = Scalar(128,0,0);
            		}
				}
            	else {
            		 pointColour = Scalar(0, 0, 255);
            	}

            }

            std::swap(trackPoints[1], trackPoints[0]);
            std::swap(prevGrey, grey);

			imshow( WindowName, rsImage);

			// LastImageSize = cvSize((int)cap.get(CV_CAP_PROP_FRAME_WIDTH), (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT));
			LastImageSize = rsImage.size();

			if(Vwrite.isOpened()) {
				 Vwrite << rsImage;
			}

    	}


        if(!ProcessKeys())
        	break;
        
    }
bool ProcessKeys() {

    char c = (char)waitKey(30);
    if( c == 27 ) {

        return false;
    }

    switch( c )
    {
    case 'c':
    case 'd':
    	// capture from cam
		{
			int camNum = (c=='c') ? 0 : 1;
			bool res = cap.open(camNum);
			if(res) {
				printf("Init capture from cam %d - ok\n", camNum);
			}
			else {
				printf("Init capture from cam %d - failed %s\n", camNum, cvErrorStr(cvGetErrStatus()));
			}

			cap.set(CV_CAP_PROP_CONVERT_RGB, 1.);
			IsCam = true;
		}
        break;
    case 'f':
    case 'g':
		{
			const char *fname = IoFiles[c-'f'];

			bool res = cap.open(fname);
			if(res) {
				printf("Init read from file %s - ok\n", fname);
			}
			else {
				printf("Init read from file %s - failed %s\n", fname, cvErrorStr(cvGetErrStatus()));
			}
			IsCam = false;
		}
        break;
    case 's':
    case 't':
		{
			if(IoFiles[c-'s'] == NULL) {
				Error("No file defined\n");
				break;
			}

			if(!Vwrite.open(IoFiles[c-'s'], CV_FOURCC('I', 'Y', 'U', 'V'),
					FRAMES_SECOND, LastImageSize, true)) {
				Error("Could not capture to file\n");
				break;
			}
		}
        break;
    default:
        break;
    }

    return true;
}
Exemple #10
0
int main (int argc, char **argv) {
    
	cout << "Début\n";
    
    // Capture de la source
    ifstream sourceFile;
    string source;
    
    sourceFile.open(SOURCE_FILE);
    
    if(sourceFile.is_open()) {
		getline(sourceFile, source);
		sourceFile.close();
	}
	else { source = "0"; }
	
    VideoCapture video;
    switch (source[0]) {
        case 'v':
            video.open(source.substr(2));
            break;
        default:
            video.open(0);
            break;
    }
    
    
    // Gestion des erreurs si la capture est vide
	if (! video.isOpened()) {
		cout << "Problème source\n";
		return -1;
	}

    // Création des fenêtres
	namedWindow("trace", CV_WINDOW_NORMAL);
	namedWindow("panel", CV_WINDOW_NORMAL);
	namedWindow("transformed", CV_WINDOW_NORMAL);
	namedWindow("base", CV_WINDOW_NORMAL);
	
	resizeWindow("base", 500, 375);
	resizeWindow("transformed", 500, 375);
	resizeWindow("panel", 500, 750);
	resizeWindow("trace", 500, 375);
	
	moveWindow("base", 0, 0);
	moveWindow("transformed", 0, 375);
	moveWindow("panel", 500, 0);
	moveWindow("trace", 1000, 0);
	
	// Déclaration des variables utilisées
	int blur = BLUR,
	    tracer = 1,
	    norma = 1,
	    inverseRed = 1,
	    hue = HUE_BASE,
	    saturation = SATURATION_BASE,
	    hueCustom = HUE_TOLERANCE,
	    saturationCustom = SATURATION_TOLERANCE,
	    hueInverse = HUE_BASE_INVERSE,
	    hueToleranceInverse = HUE_TOLERANCE_INVERSE,
	    saturationInverse = SATURATION_BASE_INVERSE,
	    saturationToleranceInverse = SATURATION_TOLERANCE_INVERSE,
	    compteurErreurs = 0,
        key = 0,
        videoHeight = video.get(CV_CAP_PROP_FRAME_HEIGHT),
        videoWidth = video.get(CV_CAP_PROP_FRAME_WIDTH),
	    rayon = max(videoWidth, videoHeight),
	    erodeSize = ERODE_SIZE,
	    dilateSize = DILATE_SIZE;
        
    float angle = 0.0,
    	newAngle = 0.0, 
        distance = 0.0, 
        newDistance = 0.0,
        Rg = 0.0, 
        Rd = 0.0;
	
	bool continuer = true,
	    pause = false,
	    registerVideo = OUTPUT_VIDEO,
	    registerValeurs = OUTPUT_VALEURS;
	   
	Mat frameOrigine,
	    frameCouleurs,
	    frameHSV,
	    frameDetection,
	    frameDetectionInverseRed,
	    frameTrace(videoHeight, videoWidth, CV_8UC3),
        frameContours,
        frameOutput(videoHeight, videoWidth * 3, CV_8UC3),
        element;
        
	Point center((int)(videoWidth / 2) , (int)(videoHeight / 2));
	
	vector<Mat> channels, outputFrames(3);
	
	vector<vector<Point> > contours;
	
	VideoWriter outputVideo;
	outputVideo.open(
		OUTPUT_VIDEO_FILE, 
		CV_FOURCC('M', 'J', 'P', 'G'), 
		10, 
		Size(videoWidth * 3, videoHeight), 
		true
	);
	if (! outputVideo.isOpened()) {
		cout << "Problème output video.\n";
		registerVideo = false;
	} 
	
	ofstream output;
	output.open(OUTPUT_FILE);
	if (! output.is_open()) { registerValeurs = false; }

    // Création des barres de sélection	
	createTrackbar("Valeur de flou", "panel", &blur, 30);
	createTrackbar("Appliquer l'égalisation d'histogramme", "panel", &norma, 1);
	
	createTrackbar("hue base", "panel", &hue, 180);
	createTrackbar("saturation base", "panel", &saturation, 255);
	createTrackbar("hue tolerance", "panel", &hueCustom, 180);
	createTrackbar("saturation tolerance", "panel", &saturationCustom, 255);
	
	createTrackbar("hue base Inverse", "panel", &hueInverse, 180);
	createTrackbar("saturation base Inverse", "panel", &saturationInverse, 255);
	createTrackbar("hue tolerance Inverse", "panel", &hueToleranceInverse, 180);
	createTrackbar("saturation tolerance Inverse", "panel", &saturationToleranceInverse, 255);
	
	createTrackbar("Taille Erode", "panel", &erodeSize, 30);
	createTrackbar("Taille Dilate", "panel", &dilateSize, 30);
	
	createTrackbar("Appliquer la détection inverse", "panel", &inverseRed, 1);
	
	createTrackbar("Afficher la détection", "panel", &tracer, 1);
	
	
	
	while (continuer) {
	
	    // Récupération d'une image
	    if (! pause) {
		    video >> frameOrigine;
		}
		frameCouleurs = frameOrigine.clone();
		
		if (frameCouleurs.empty()) {
		
		    // Gestion des erreurs si l'image est vide
			cout << "Problème frame\n";
			compteurErreurs ++;
			if (compteurErreurs > 5) {
	            cout << "Arrêt : 6 frames erronées consécutives\n";
	            continuer = false;
			}
		}
		else {
		    compteurErreurs = 0;
		    
		    // Blur / Flou
		    if (blur) {
		        GaussianBlur(frameCouleurs, frameCouleurs, Size(9, 9), blur, blur);
		    }
		    
		    // Normalize
		    if (norma) {
		    	cvtColor(frameCouleurs, frameCouleurs, CV_BGR2YCrCb); 
		    	split(frameCouleurs, channels); 
		    	equalizeHist(channels[0], channels[0]);
		    	merge(channels,frameCouleurs); 
			    cvtColor(frameCouleurs, frameCouleurs, CV_YCrCb2BGR); 
		    }
		    
		    // BGR 2 HSV
			cvtColor(frameCouleurs, frameHSV, CV_BGR2HSV);
			
			// Couleur ciblée en blanc et le reste en noir
            
			inRange(
			    frameHSV, 
		        Scalar(hue - hueCustom, saturation - saturationCustom, 0), 
		        Scalar(hue + hueCustom, saturation + saturationCustom, 255), 
                frameDetection
            );
            
            // Autre rouge (il y a la teinte 180 et la 0)
            if (inverseRed) {
                inRange(
			        frameHSV, 
		            Scalar(hueInverse - hueToleranceInverse, saturationInverse - saturationToleranceInverse, 0), 
		            Scalar(hueInverse + hueToleranceInverse, saturationInverse + saturationToleranceInverse, 255), 
                    frameDetectionInverseRed
                );
                addWeighted(frameDetection, 1, frameDetectionInverseRed, 1, 0, frameDetection, frameDetection.type());
            }
            
            // Erode
            if (erodeSize) {
		        element = getStructuringElement(
		        	MORPH_ELLIPSE, 
		        	Size(erodeSize, erodeSize)
	        	);    
		        erode(frameDetection, frameDetection, element);
	        }
	        
	        // Dilate
	        if (dilateSize) {
		        element = getStructuringElement(
		        	MORPH_ELLIPSE, 
		        	Size(dilateSize, dilateSize)
	        	);    
		        dilate(frameDetection, frameDetection, element);           
            }
            
            // Trouver l'objet
            frameContours = frameDetection.clone();
            findContours(frameContours, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, Point(0, 0)); 
            drawContours(frameDetection, contours, -1, Scalar(255, 255, 0));
            findObject(contours, center, rayon);
            
            // Calculer angle distance et vitesses
            newDistance = findDistance(rayon, videoHeight);
	        newAngle = findRotation(center.x, FIELD_VIEW, videoWidth);
	        // Amelioration de comportement quand detection defaillante
            if (abs(distance - newDistance) > MAXIMUM_DEPLACEMENT_POSSIBLE) {
            	Rg = STOPPER;
            	Rd = STOPPER;
            }
            else {
            	distance = newDistance;
            	angle = newAngle;
	        	findVitesses(distance, angle, Rg, Rd);
           	}
                
            
            if (rayon <= 0) { rayon = 1; }
            
            // Affichage
            if (tracer) {
            
                // Fenêtre Base
                circle(frameCouleurs, center, 2, Scalar(0, 255, 0), -1); // centre
                circle(frameCouleurs, center, rayon, Scalar(0, 255, 0), 1); // périmètre
                
                // Fenêtre Transformed
                cvtColor(frameDetection, frameDetection, CV_GRAY2BGR);
                circle(frameDetection, center, 2, Scalar(0, 0, 255), -1);
                circle(frameDetection, center, rayon, Scalar(0, 0, 255), 1); 
            }
            
            // Fenêtre Trace
            circle(frameTrace, center, 2, Scalar(0, 0, 255), -1);
            circle(frameTrace, center, rayon, Scalar(0, rayon * 5, 255), 1); 
            
			imshow("base", frameCouleurs);
			imshow("transformed", frameDetection);
			imshow("trace", frameTrace);
			
			// Output
            if (! pause) {
            	if (registerVideo) {
				    outputFrames[0] = frameCouleurs;
					outputFrames[1] = frameDetection;
					outputFrames[2] = frameTrace;
		        	hconcat(outputFrames, frameOutput);
		        	outputVideo << frameOutput;
            	}
            	if (registerValeurs) {
		            output 
		                << "Rg = " << Rg << " "
		                << "Rd = " << Rd << " "
		                << "A = " << angle << "°            "
		                << "D = " << distance << "cm" << endl;
                }
            }
			
		}
		
		// Rafraîchissement
		key = waitKey(REFRESH);
		switch (key) {
		    case PAUSE_KEY :
		        pause = pause ? false : true;
	            break;
            case EXIT_KEY :
                continuer = false;
                break;
		}
		
	}
int main(int argc, char** argv)
{
    if(argc >= 3)
    {
        VideoCapture inputVideo(argv[1]); // open the default camera
        if(!inputVideo.isOpened())  // check if we succeeded
            return -1; 
        
        // Initialize
        VideoWriter outputVideo;  // Open the output
        const string source      = argv[2];                                // the source file name
        const string NAME = source + ".mp4";   // Form the new name with container
        int ex = inputVideo.get(CV_CAP_PROP_FOURCC);                       // Get Codec Type- Int form
        std::cout << ex << "\n" << (int)inputVideo.get(CV_CAP_PROP_FOURCC) << "\n";
        Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH),       //Acquire input size
                      (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));    
        outputVideo.open(NAME, ex, inputVideo.get(CV_CAP_PROP_FPS), S, false);
        char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};
        cout << "Input codec type: " << EXT << endl;

         if (!outputVideo.isOpened())
        {
            cout  << "Could not open the output video for write \n";
            return -1;
        }
                // Basketball Color
        int iLowH = 180;
        int iHighH = 16;
        
        int iLowS =  95;
        int iHighS = 200;
        
        int iLowV = 75;
        int iHighV = 140;
        
        // court Color
        int courtLowH = 0;
        int courtHighH = 20;
        
        int courtLowS = 50;
        int courtHighS = 150;
        
        int courtLowV = 160;
        int courtHighV = 255;
        
        namedWindow("Result Window", 1);
        //namedWindow("Court Window", 1);
        
        // Mat declaration
        Mat prev_frame, prev_gray, cur_frame, cur_gray;
        Mat frame_blurred, frameHSV, frameGray;
        
        // take the first frame
        inputVideo >> prev_frame;
        
        /* manual ball selection */
        MouseParams mp;
        prev_frame.copyTo( mp.ori );
        prev_frame.copyTo( mp.img );
        setMouseCallback("Result Window", BallSelectFunc, &mp );
        
        int enterkey = 0;
        while(enterkey != 32 && enterkey != 113)
        {
            enterkey = waitKey(30) & 0xFF;
            imshow("Result Window", mp.img);
        }
        Rect  lastBallBox;
        Point lastBallCenter;
        Point lastMotion;
        
        /* Kalman Filter Initialization */
        KalmanFilter KF(4, 2, 0);
        float transMatrixData[16] = {1,0,1,0, 0,1,0,1, 0,0,1,0, 0,0,0,1};
        KF.transitionMatrix = Mat(4, 4, CV_32F, transMatrixData);
        Mat_<float> measurement(2,1);
        measurement.setTo(Scalar(0));
        
        KF.statePre.at<float>(0) = mp.pt.x;
        KF.statePre.at<float>(1) = mp.pt.y;
        KF.statePre.at<float>(2) = 0;
        KF.statePre.at<float>(3) = 0;
        setIdentity(KF.measurementMatrix);
        setIdentity(KF.processNoiseCov, Scalar::all(1e-4));
        setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1));
        setIdentity(KF.errorCovPost, Scalar::all(.1));
        int pre_status_7=0;
        
        /* start tracking */
        setMouseCallback("Result Window", CallBackFunc, &frameHSV);
        
        for(int frame_num=1; frame_num < inputVideo.get(CAP_PROP_FRAME_COUNT); ++frame_num)
        {
            int cur_status_7=pre_status_7;
            
            inputVideo >> cur_frame; // get a new frame
            // Blur & convert frame to HSV color space
            cv::GaussianBlur(prev_frame, frame_blurred, cv::Size(5, 5), 3.0, 3.0);
            cvtColor(frame_blurred, frameHSV, COLOR_BGR2HSV);
            
            // gray scale current frame
            cvtColor(prev_frame, prev_gray, CV_BGR2GRAY);
            cvtColor(cur_frame, cur_gray, CV_BGR2GRAY);
            
            /*
             * STAGE 1: mask generation
             * creating masks for balls and courts.
             */
            Mat mask, mask1, mask2, court_mask;
            inRange(frameHSV, Scalar(2, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), mask1);
            inRange(frameHSV, Scalar(iLowH, iLowS, iLowV), Scalar(180, iHighS, iHighV), mask2);
            inRange(frameHSV, Scalar(courtLowH, courtLowS, courtLowV), Scalar(courtHighH, courtHighS, courtHighV), court_mask);
            
            mask = mask1 + mask2;
            
            // morphological opening (remove small objects from the foreground)
            erode(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            dilate(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            
            // morphological closing (fill small holes in the foreground)
            dilate(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            erode(mask, mask, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            
            /*
             * Method:  HoughCircles
             * creating circles and radius.
             */
            // Basketball Color for Hough circle
            
            int iLowH = 180;
            int iHighH = 16;
            
            int iLowS =  95;
            int iHighS = 200;
            
            int iLowV = 75;
            int iHighV = 140;
            
            Mat mask1_circle, mask2_circle, mask_circle, frameHSV_circle, frameFiltered,frameGray2;
            cvtColor(frame_blurred, frameHSV_circle, COLOR_BGR2HSV);
            inRange(frameHSV_circle, Scalar(0, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), mask1_circle);
            inRange(frameHSV_circle, Scalar(iLowH, iLowS, iLowV),Scalar(180, iHighS, iHighV), mask2_circle);
            mask_circle = mask1_circle + mask2_circle;
            erode(mask_circle, mask_circle, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            dilate(mask_circle, mask_circle, getStructuringElement(MORPH_ELLIPSE, Size(7, 7)) );
            
            prev_frame.copyTo( frameFiltered, mask_circle );
            cv::cvtColor( frameFiltered, frameGray2, CV_BGR2GRAY );
            vector<cv::Vec3f> circles;
            cv::GaussianBlur(frameGray2, frameGray2, cv::Size(5, 5), 3.0, 3.0);
            HoughCircles( frameGray2, circles, CV_HOUGH_GRADIENT, 1, frameGray2.rows/8, 120, 18, 5,300);
            
            /*
             * STAGE 2: contour generation
             * creating contours with masks.
             */
            vector< vector<cv::Point> > contours_ball;
            vector< vector<cv::Point> > contours_court;
            cv::findContours(mask, contours_ball, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
            
            Mat result;
            
            prev_frame.copyTo( result );
            
            /*
             // court mask refinement: eliminate small blocks
             Mat buffer;
             court_mask.copyTo( buffer );
             cv::findContours(buffer, contours_court, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
             
             for (size_t i = 0; i < contours_court.size(); i++)
             {
             double tmp_area = contourArea( contours_court[i] );
             if(tmp_area < 900.0)
             drawContours(court_mask, contours_court, i, 0, CV_FILLED);
             }
             bitwise_not(court_mask, court_mask);
             court_mask.copyTo( buffer );
             cv::findContours(buffer, contours_court, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
             for (size_t i = 0; i < contours_court.size(); i++)
             {
             double tmp_area = contourArea( contours_court[i] );
             if(tmp_area < 900.0)
             drawContours(court_mask, contours_court, i, 0, CV_FILLED);
             }
             bitwise_not(court_mask, court_mask);
             
             Mat canny_mask;
             Canny(court_mask, canny_mask, 50, 150, 3);
             vector<Vec4i> lines;
             HoughLinesP(canny_mask, lines, 1, CV_PI/180, 80, 30, 10);
             
             Point l_top( mask.cols/2, mask.rows );
             Point l_bot( mask.cols/2, mask.rows );
             
             for( size_t i = 0; i < lines.size(); i++ )
             {
             Point p1 = Point(lines[i][0], lines[i][1]);
             Point p2 = Point(lines[i][2], lines[i][3]);
             
             if(p1.y < l_top.y)
             {
             l_top = p1;
             l_bot = p2;
             }
             if(p2.y < l_top.y)
             {
             l_top = p2;
             l_bot = p1;
             }
             }
             // stretch the line
             Point v_diff = l_top - l_bot;
             Point p_left, p_right;
             
             
             int left_t  = l_top.x / v_diff.x;
             int right_t = (mask.cols - l_top.x) / v_diff.x;
             
             p_left = l_top - v_diff * left_t;
             p_right = l_top + v_diff * right_t;
             
             line( court_mask, p_left, p_right, Scalar(128), 2, 8 );
             imshow("Court Window", court_mask);
             */
            
            // sieves
            vector< vector<cv::Point> > balls;
            vector<cv::Point2f> prev_ball_centers;
            vector<cv::Rect> ballsBox;
            Point best_candidate;
            for (size_t i = 0; i < contours_ball.size(); i++)
            {
                drawContours(result, contours_ball, i, CV_RGB(255,0,0), 1);  // fill the area
                
                cv::Rect bBox;
                bBox = cv::boundingRect(contours_ball[i]);
                Point center;
                center.x = bBox.x + bBox.width / 2;
                center.y = bBox.y + bBox.height / 2;
                
                // meet prediction!
                if( mp.pt.x > bBox.x && mp.pt.x < bBox.x + bBox.width &&
                   mp.pt.y > bBox.y && mp.pt.y < bBox.y + bBox.height)
                {
                    // initialization of ball position at first frame
                    if( frame_num == 1 || ( bBox.area() <= lastBallBox.area() * 1.5 && bBox.area() >= lastBallBox.area() * 0.5) )
                    {
                        lastBallBox = bBox;
                        lastBallCenter = center;
                        
                        balls.push_back(contours_ball[i]);
                        prev_ball_centers.push_back(center);
                        ballsBox.push_back(bBox);
                        best_candidate = center;
                    }
                    else
                    {
                        cout << "area changed!" << endl;
                        // if the block containing ball becomes too large,
                        // we use last center + motion as predicted center
                        balls.push_back(contours_ball[i]);
                        prev_ball_centers.push_back( lastBallCenter+lastMotion );
                        ballsBox.push_back(bBox);
                        best_candidate = lastBallCenter + lastMotion;
                    }
                }
                else
                {
                    // ball size sieve
                    
                    if(  bBox.area() > 1600 )
                        continue;
                    
                    // ratio sieve
                    //                     float ratio = (float) bBox.width / (float) bBox.height;
                    //                     if( ratio < 1.0/2.0 || ratio > 2.0 )
                    //                     continue;
                    
                    // ball center sieve: since we've done dilate and erode, not necessary to do.
                    /*
                     uchar center_v = mask.at<uchar>( center );*
                     if(center_v != 1)
                     continue;
                     */
                    
                    // ball-on-court sieve: not useful in basketball =(
                    //if(court_mask.at<uchar>(center) != 255)
                    //  continue;
                    
                    balls.push_back(contours_ball[i]);
                    prev_ball_centers.push_back(center);
                    ballsBox.push_back(bBox);
                }
            }
            
            
            // store the center of the hough circle
            vector<cv::Point2f> prev_ball_centers_circle;
            for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
            {
                Point center_circle(cvRound(circles[circle_i][0]), cvRound(circles[circle_i][1]));
                int radius_circle = cvRound(circles[circle_i][2]);
                prev_ball_centers_circle.push_back(center_circle);
            }
            // Kalman Filter Prediction
            //Mat prediction = KF.predict();
            //Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
            // Kalman Filter Update
            //Mat estimated = KF.correct( best_candidate );
            
            //OpticalFlow for HSV
            vector<Point2f> cur_ball_centers;
            vector<uchar> featuresFound;
            Mat err;
            TermCriteria termcrit(TermCriteria::COUNT|TermCriteria::EPS, 20, 0.03);
            Size winSize(31, 31);
            if( prev_ball_centers.size() > 0 )
                calcOpticalFlowPyrLK(prev_gray, cur_gray, prev_ball_centers, cur_ball_centers, featuresFound, err, winSize, 3, termcrit, 0, 0.001);
            
            //OpticalFlow for circle
            vector<Point2f> cur_ball_centers_circle;
            vector<uchar> featuresFound_circle;
            Mat err2;
            if( prev_ball_centers_circle.size() > 0 )
                calcOpticalFlowPyrLK(prev_gray, cur_gray, prev_ball_centers_circle, cur_ball_centers_circle, featuresFound_circle, err2, winSize, 3, termcrit, 0, 0.001);
            
            //plot MP
            circle(result, mp.pt, 2, CV_RGB(255,255,255), 5);
            cout<<"frame_num :"<<frame_num<<endl;
            cout<<"lastMotion"<<lastMotion<<endl;
            bool ball_found = false;
            
            for (size_t i = 0; i < balls.size(); i++)
            {
                cv::Point center;
                center.x = ballsBox[i].x + (ballsBox[i].width / 2);
                center.y = ballsBox[i].y + (ballsBox[i].height/2);
                // consider hough circle
                int circle_in_HSV=0;
                int in=0;
                for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
                {
                    
                    
                    Point center2(cvRound(circles[circle_i][0]), cvRound(circles[circle_i][1]));
                    int radius = cvRound(circles[circle_i][2]);
                    double dis_center =  sqrt(pow(center2.x-center.x,2)+pow(center2.y-center.y,2));
                    
                    if( frame_num >2 && radius<40 && dis_center<radius+3 && mp.pt.x > ballsBox[i].x && mp.pt.x < ballsBox[i].x + ballsBox[i].width && mp.pt.y > ballsBox[i].y && mp.pt.y < ballsBox[i].y + ballsBox[i].height){
                        circle_in_HSV=1;
                        Point motion = cur_ball_centers_circle[circle_i] - prev_ball_centers_circle[circle_i];
                        mp.pt = Point2f(cur_ball_centers_circle[circle_i].x, cur_ball_centers_circle[circle_i].y);
                        lastMotion = motion;
                        cout<<mp.pt<<endl;
                        cout<<"status 1"<<endl;
                        cout<<motion<<endl;
                        ball_found = true;
                        in=1;
                        cout<<in<<endl;
                        cv::circle( result, center2, radius, Scalar(0,255,0), 2 );
                    }
                    
                    //                    if(radius<40){
                    //                        stringstream sstr;
                    //                        sstr << "(" << center2.x << "," << center2.y << ")";
                    ////                        cv::putText(result, sstr.str(),
                    ////                                    cv::Point(center2.x + 3, center2.y - 3),
                    ////                                    cv::FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(20,150,20), 2);
                    //                        cv::circle( result, center2, radius, Scalar(12,12,255), 2 );}
                }
                
                // see if any candidates contains out ball
                if( circle_in_HSV==0 && mp.pt.x > ballsBox[i].x && mp.pt.x < ballsBox[i].x + ballsBox[i].width && mp.pt.y > ballsBox[i].y && mp.pt.y < ballsBox[i].y + ballsBox[i].height)
                {
                    cv::rectangle(result, ballsBox[i], CV_RGB(0,255,0), 2);
                    Point motion = cur_ball_centers[i] - prev_ball_centers[i];
                    // update points and lastMotion
                    
                    float ratio = (float) ballsBox[i].width / (float) ballsBox[i].height;
                    if( ballsBox[i].area() < 1000 && ratio>0.7 && ratio<1.35 && ballsBox[i].area() > 200){
                        mp.pt = Point2f(center.x, center.y);
                        cout<<"status 2"<<endl;
                        cout<<"AREA:"<<ballsBox[i].area()<<endl;
                    }else{
                        mp.pt = Point2f(mp.pt.x+motion.x, mp.pt.y+motion.y);
                        cout<<"status 3"<<endl;
                    }
                    // TODO replace with predicted points of kalman filter here.
                    lastMotion = motion;
                    ball_found = true;
                }
                
                // draw optical flow
                if(!featuresFound[i])
                    continue;
                
                cv::Point2f prev_center = prev_ball_centers[i];
                cv::Point2f curr_center = cur_ball_centers[i];
                cv::line( result, prev_center, curr_center, CV_RGB(255,255,0), 2);
                
            }
            
            // if ball is not found, search for the closest ball candidate within a distance.
            if(!ball_found)
            {
                int search_distance_threshold = 35*35;
                int closest_dist      = 2000;
                //                int closest_dist2      = 2000;
                int closest_area_diff = 10000;
                int best_i = 0;
                
                for (size_t i = 0; i < balls.size(); i++)
                {
                    int diff_x = prev_ball_centers[i].x - mp.pt.x;
                    int diff_y = prev_ball_centers[i].y - mp.pt.y;
                    int area_threshold_high = 100*100;
                    int area_threshold_low = 15*15;
                    int distance  = diff_x * diff_x + diff_y * diff_y;
                    int area_diff = abs(ballsBox[i].area()-lastBallBox.area());
                    float ratio = (float) ballsBox[i].width / (float) ballsBox[i].height;
                    //                    if(distance<closest_dist2){
                    //                        closest_dist2=distance;
                    //                        best_i = i;}
                    // if distance is small
                    if( distance < search_distance_threshold &&
                       distance < closest_dist && ratio>0.7 && ratio<1.45 && ballsBox[i].area()<area_threshold_high && ballsBox[i].area()>area_threshold_low)
                    {
                        closest_dist      = distance;
                        closest_area_diff =  area_diff;
                        best_i = i;
                        ball_found = true;
                    }
                }
                //                cout<<"ballsBox[i].area()"<<ballsBox[best_i].area()<<endl;
                //                cout<<"Ratio"<<(float) ballsBox[best_i].width / (float) ballsBox[best_i].height<<endl;
                int best_radius;
                if(ball_found)
                {
                    // reset mp.pt
                    cout<<"here! yello"<<endl;
                    
                    int search_distance_threshold = 80*80;
                    int closest_dist = 2000;
                    int best_circle_i = 0;
                    bool circle_found = false;
                    for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
                    {
                        int radius = cvRound(circles[circle_i][2]);
                        int diff_x = prev_ball_centers_circle[circle_i].x - mp.pt.x;
                        int diff_y = prev_ball_centers_circle[circle_i].y - mp.pt.y;
                        int distance  = diff_x * diff_x + diff_y * diff_y;
                        if( distance < search_distance_threshold && radius>8 && radius<13)
                        {
                            closest_dist      = distance;
                            best_circle_i = circle_i;
                            circle_found = true;
                            cout<<"radius"<<radius<<endl;
                            best_radius = radius;
                        }
                    }
                    if(circle_found){
                        cv::circle( result, cur_ball_centers_circle[best_circle_i], best_radius, CV_RGB(255,255,0), 2 );
                        mp.pt = Point2f(cur_ball_centers_circle[best_circle_i].x, cur_ball_centers_circle[best_circle_i].y);
                        cout<<"status 4"<<endl;
                    } else{
                        cv::rectangle(result, ballsBox[best_i], CV_RGB(255,255,0), 2);
                        Point motion = cur_ball_centers[best_i] - prev_ball_centers[best_i];
                        mp.pt = Point2f(cur_ball_centers[best_i].x, cur_ball_centers[best_i].y);
                        lastMotion = motion;
                        cout<<"status 5"<<endl;
                    }
                    
                }
                else
                {
                    // if ball still not found... stay at the same direction
                    circle(result, mp.pt, 5, CV_RGB(255,255,255), 2);
                    int search_distance_threshold, closest_dist,best_i,radius_threshold_low, radius_threshold_high, ball_found;
                    if(cur_status_7>1){
                        search_distance_threshold = 200*200;
                        closest_dist      = 55000;
                        best_i = 0;
                        radius_threshold_low=4;
                        radius_threshold_high=16;
                        ball_found = false;}
                    else{
                        search_distance_threshold = 80*80;
                        closest_dist      = 6000;
                        best_i = 0;
                        radius_threshold_low=7;
                        radius_threshold_high=13;
                        ball_found = false;
                    }
                    int best_radius;
                    for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
                    {
                        int radius = cvRound(circles[circle_i][2]);
                        int diff_x = prev_ball_centers_circle[circle_i].x - mp.pt.x;
                        int diff_y = prev_ball_centers_circle[circle_i].y - mp.pt.y;
                        int distance  = diff_x * diff_x + diff_y * diff_y;
                        if( distance < search_distance_threshold && radius>radius_threshold_low && radius<radius_threshold_high)
                        {
                            closest_dist      = distance;
                            best_i = circle_i;
                            ball_found = true;
                            best_radius =radius;
                            cout<<"radius"<<radius<<endl;
                            cout<<mp.pt<<endl;
                        }
                    }
                    if(ball_found){
                        cv::circle( result, cur_ball_centers_circle[best_i], best_radius, CV_RGB(255,255,0), 2 );
                        Point motion = cur_ball_centers_circle[best_i] - prev_ball_centers_circle[best_i];
                        mp.pt = Point2f(cur_ball_centers_circle[best_i].x, cur_ball_centers_circle[best_i].y);
                        lastMotion = motion;
                        cout<<mp.pt<<endl;
                        cout<<motion<<endl;
                        cout<<"status 6"<<endl;
                    }else{
                        //                        mp.pt = lastBallCenter + lastMotion;
                        cout<<"status 7"<<endl;
                        cout<<"lastBallCenter"<<lastBallCenter<<endl;
                    }
                    //                          mp.pt = Point2f(mp.pt.x+lastMotion.x, mp.pt.y+lastMotion.y);
                    pre_status_7+=1;
                }
            }
            
            if(lastMotion.x*lastMotion.x+lastMotion.y*lastMotion.y>1200){
                cout<<"HIGH SPEED"<<endl;
                cout<<"HIGH SPEED"<<endl;
                cout<<"HIGH SPEED"<<endl;
                cout<<"MP before"<<mp.pt<<endl;
                int search_distance_threshold = 200*200;
                int closest_dist = 55000;
                int best_circle_i = 0;
                int best_i=0;
                bool ball_found = false;
                for (size_t i = 0; i < balls.size(); i++)
                {
                    int diff_x = prev_ball_centers[i].x - mp.pt.x;
                    int diff_y = prev_ball_centers[i].y - mp.pt.y;
                    int area_threshold_high = 100*100;
                    int area_threshold_low = 10*10;
                    int distance  = diff_x * diff_x + diff_y * diff_y;
                    int area_diff = abs(ballsBox[i].area()-lastBallBox.area());
                    float ratio = (float) ballsBox[i].width / (float) ballsBox[i].height;
                    //                    if(distance<closest_dist2){
                    //                        closest_dist2=distance;
                    //                        best_i = i;}
                    // if distance is small
                    if( distance < search_distance_threshold &&
                       distance < closest_dist && ratio>0.7 && ratio<1.45 && ballsBox[i].area()<area_threshold_high && ballsBox[i].area()>area_threshold_low)
                    {
                        closest_dist    = distance;
                        best_i = i;
                        ball_found = true;
                    }
                }
                if(ball_found)
                {
                    cv::rectangle(result, ballsBox[best_i], CV_RGB(255,255,0), 2);
                    Point motion = cur_ball_centers[best_i] - prev_ball_centers[best_i];
                    mp.pt = Point2f(cur_ball_centers[best_i].x, cur_ball_centers[best_i].y);
                    lastMotion = motion;
                    cout<<"ball"<<endl;
                }
                
                circle(result, mp.pt, 5, CV_RGB(255,255,255), 2);
                int radius_threshold_low, radius_threshold_high;
                int best_radius;
                search_distance_threshold = 200*200;
                closest_dist      = 55000;
                best_i = 0;
                radius_threshold_low=5;
                radius_threshold_high=13;
                ball_found = false;
                
                for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
                {
                    int radius = cvRound(circles[circle_i][2]);
                    int diff_x = prev_ball_centers_circle[circle_i].x - mp.pt.x;
                    int diff_y = prev_ball_centers_circle[circle_i].y - mp.pt.y;
                    int distance  = diff_x * diff_x + diff_y * diff_y;
                    if( distance < search_distance_threshold && radius>radius_threshold_low && radius<radius_threshold_high)
                    {
                        closest_dist      = distance;
                        best_i = circle_i;
                        ball_found = true;
                        best_radius = radius;
                    }
                }
                if(ball_found)
                {
                    //                    cv::circle( result, cur_ball_centers_circle[best_i], best_radius, Scalar(255,255,0), 2 );
                    Point motion = cur_ball_centers_circle[best_i] - prev_ball_centers_circle[best_i];
                    mp.pt = Point2f(cur_ball_centers_circle[best_i].x, cur_ball_centers_circle[best_i].y);
                    lastMotion = motion;
                    cout<<"circle"<<endl;
                }
                cout<<"MP after"<<mp.pt<<endl;
                
            }
            
            for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
            {
                
                
                Point center2(cvRound(circles[circle_i][0]), cvRound(circles[circle_i][1]));
                double dis_center =  sqrt(pow(center2.x-mp.pt.x,2)+pow(center2.y-mp.pt.y,2));
                int radius = cvRound(circles[circle_i][2]);
                if(dis_center<200)
                    continue;
                cv::circle( result, center2, radius, Scalar(12,12,255), 2 );
                
            }
            
            if(mp.pt.x<1)
                mp.pt.x=1;
            if(mp.pt.x>1279)
                mp.pt.x=1279;
            if(mp.pt.y<1)
                mp.pt.y=1;
            if(mp.pt.y>719)
                mp.pt.y=719;
            if(pre_status_7==cur_status_7)
                pre_status_7=0;
            imshow("Result Window", result);
            
            /* UPDATE FRAME */
            cur_frame.copyTo( prev_frame );
            
            /* KEY INPUTS */
            int keynum = waitKey(30) & 0xFF;
            if(keynum == 113)      // press q
                break;
            else if(keynum == 32)  // press space
            {
                keynum = 0;
                while(keynum != 32 && keynum != 113)
                    keynum = waitKey(30) & 0xFF;
                if(keynum == 113)
                    break;
            }
        }
        inputVideo.release();
        outputVideo.release();
    }
int main( int argc, char **argv ) {
  string fn, output_fn;
  int box_size, horizon_crop;
  int start_frame;
  float scale_factor;
  int end_frame;
  int ransac_max_iters;
  float ransac_good_ratio;
  try
  {
    po::options_description desc("Options");
    desc.add_options()
      ("help,h", "Print help messages")
      ("boxsize,b", po::value<int>(&box_size)->default_value(20), "The size of the box that you search for the best point to track in")
      ("hcrop,c", po::value<int>(&horizon_crop)->default_value(30), "Horizontal Border Crop, crops the border to reduce the black borders from stabilization being too noticeable.")
      ("manualframe,m", po::value<int>(&start_frame)->default_value(0), "Frame to do manual capturing on.")
      ("endframe,e", po::value<int>(&end_frame)->default_value(0), "Frame to stop stabilization at.")
      ("scalefactor,s", po::value<float>(&scale_factor)->default_value(0.25), "Scaling Factor for manual marking.")
      ("ransac_max_iters,i", po::value<int>(&ransac_max_iters)->default_value(500), "Maximum number of iterations for RANSAC.")
      ("ransac_good_ratio,g", po::value<float>(&ransac_good_ratio)->default_value(0.9), "Inlier Ratio used for RANSAC.")
      //A higher inlier ratio will force model to only estimate the affine transform using that percentage of inlier points.
      ("footage,f", po::value<string>(&fn)->required(), "footage file")
      ("output,o", po::value<string>(&output_fn)->default_value("output.avi"), "output file");

    po::positional_options_description positionalOptions; 
    positionalOptions.add("footage", 1);

    po::variables_map vm;

    // Parse command line arguments
    try
    {
      po::store(po::command_line_parser(argc, argv).options(desc).positional(positionalOptions).run(), vm);

      if ( vm.count( "help" ) )
      {
        cout << "This is the stabilization software for the gopro camera. " << endl << endl; 
        cout << "Usage: " << argv[0] << " [options] <footage>" << endl  << endl << desc << endl;

        return 0;
      }
      
      po::notify(vm);
    }
    catch ( po::error& e )
    {
      cerr << "ERROR: " << e.what() << endl << endl;
      cerr << desc << endl;
      return 1;
    }
    if (end_frame > 0 && end_frame < start_frame)
    {
      throw invalid_argument( "selected end frame is before start frame");
    }
    VideoCapture capturefirst(fn);
    VideoWriter writer;
    writer.open(output_fn, CV_FOURCC('m','p','4','v'), capturefirst.get(CV_CAP_PROP_FPS), Size((int) capturefirst.get(CV_CAP_PROP_FRAME_WIDTH), (int) capturefirst.get(CV_CAP_PROP_FRAME_HEIGHT)), true);
    Mat curr, curr_grey;
    Mat first, first_grey, first_grey_disp;
    int max_frames = capturefirst.get(CV_CAP_PROP_FRAME_COUNT);
    printf("Number of frames in video: %d\n",max_frames);
    if ( start_frame > max_frames )
    {
      throw invalid_argument( "start_frame larger than max frames" );
    }
    if (end_frame == 0)
    {
      end_frame = max_frames;
    }
    capturefirst.set(CV_CAP_PROP_POS_FRAMES, start_frame);
    do
    {
      capturefirst >> first;
    }
    while ( first.data == NULL );
    cvtColor(first, first_grey, COLOR_BGR2GRAY);
    resize(first_grey, first_grey_disp, Size(), scale_factor, scale_factor);
    vector <Point2f> first_corners, first_corners2;
    struct UserData ud(first_grey_disp, &first_corners, box_size, scale_factor);

    namedWindow("first", CV_WINDOW_AUTOSIZE);
    setMouseCallback("first", select_features_callback, &ud);
    imshow("first", first_grey_disp);
    waitKey(0);
    cout << first_corners.size() << " corners detected." << endl;
    destroyAllWindows();
    writer << first;

    Mat last_T;
    capturefirst.release();

    VideoCapture capture(fn);
    cout << "Analyzing" << endl;
    int k = 0;
    while ( k < max_frames - 1 )
    {
      capture >> curr;
      
      if ( curr.data == NULL )
      {
        break;
      }

      cvtColor(curr, curr_grey, COLOR_BGR2GRAY);
      
      vector <Point2f> curr_corners, curr_corners2;
      vector <uchar> status;
      vector <float> err;

      calcOpticalFlowPyrLK(first_grey, curr_grey, first_corners, curr_corners, status, err);
      
      // weed out bad matches
      first_corners2.clear();
      for ( int i = 0; i < status.size(); ++i )
      {
        if ( status[i] )
        {
          first_corners2.push_back(first_corners[i]);
          curr_corners2.push_back(curr_corners[i]);
        }
      }

      //Mat T = estimateRigidTransform(curr_corners2, first_corners2, true);
      //Mat T = findHomography(curr_corners2, first_corners2, CV_RANSAC);
      Mat T = estimateRigidTransformRansac(curr_corners2, first_corners2, true, ransac_max_iters, ransac_good_ratio);
      if ( T.data == NULL )
      {
        last_T.copyTo(T);
      }

      T.copyTo(last_T);

      Mat currT;
      warpAffine( curr, currT, T, curr.size() );
      //warpPerspective(curr, currT, T, curr.size());

      int vert_border = horizon_crop * first.rows / first.cols;
      currT = currT( Range(vert_border, currT.rows-vert_border), Range(horizon_crop, currT.cols-horizon_crop) );
      resize(currT, currT, curr.size());
      writer << currT;

      disp_progress((float)k/(max_frames-1), 50);
      k++;
    }
    cout << endl;
    capture.release();

  }
  catch ( exception& e )
  {
    cerr << "Unhandled Exception reached the top of main: " << e.what() << ", application will now exit." << endl;
    return 2;
  }

  return 0;
} // main
void visionNode::run(){
	//run initial calibration. If that fails, this node will shut down.
	if(!calibrate()) ros::shutdown();
	
	VideoWriter outputVideo;
	Size S = cv::Size(cam->get_img_width(),cam->get_img_height());
	outputVideo.open("/home/lcv/output.avi" , CV_FOURCC('M','P','2','V'), 30, S, true);

	//main loop
	while(ros::ok()){

		//if calibration was manualy invoked by call on the service
		if(invokeCalibration) {
			invokeCalibration = false;
			calibrate();
		}

		//grab frame from camera
		cam->get_frame(&camFrame);

		//correct the lens distortion
		rectifier->rectify(camFrame, rectifiedCamFrame);

		//create a duplicate grayscale frame
		cv::Mat gray;
		cv::cvtColor(rectifiedCamFrame, gray, CV_BGR2GRAY);

		//draw the calibration points
		for(point2f::point2fvector::iterator it=markers.begin(); it!=markers.end(); ++it)
			cv::circle(rectifiedCamFrame, cv::Point(cv::saturate_cast<int>(it->x), cv::saturate_cast<int>(it->y)), 1, cv::Scalar(0, 0, 255), 2);

		//detect crates
		std::vector<Crate> crates;
		qrDetector->detectCrates(gray, crates);

		//transform crate coordinates
		for(std::vector<Crate>::iterator it=crates.begin(); it!=crates.end(); ++it)
		{
			it->draw(rectifiedCamFrame);

			std::vector<cv::Point2f> points;
			for(int n = 0; n <3; n++){
				point2f result = cordTransformer->to_rc(point2f(it->getPoints()[n].x, it->getPoints()[n].y));
				points.push_back(cv::Point2f(result.x, result.y));
			}
			it->setPoints(points);
		}

		//inform the crate tracker about the seen crates
		std::vector<CrateEvent> events = crateTracker->update(crates);

		//publish events
		for(std::vector<CrateEvent>::iterator it = events.begin(); it != events.end(); ++it)
		{
			vision::CrateEventMsg msg;
			msg.event = it->type;
			msg.crate.name = it->name;
			msg.crate.x = it->x;
			msg.crate.y = it->y;
			msg.crate.angle = it->angle;

			ROS_INFO(it->toString().c_str());
			crateEventPublisher.publish(msg);
		}

		//update GUI
		outputVideo.write(rectifiedCamFrame);
		imshow("image",rectifiedCamFrame);
		waitKey(1000/30);

		//let ROS do it's magical things
		ros::spinOnce();
	}
}
Exemple #14
0
int main(int argc, char **argv)
{
	if (argc < 2) {
		cout << "./VideoStab [video.avi]" << endl;
		return 0;
	}
	// For further analysis
	ofstream out_transform("prev_to_cur_transformation.txt");
	ofstream out_trajectory("trajectory.txt");
	ofstream out_smoothed_trajectory("smoothed_trajectory.txt");
	ofstream out_new_transform("new_prev_to_cur_transformation.txt");

	VideoCapture cap(argv[1]);
	assert(cap.isOpened());

	Mat cur, cur_grey;
	Mat prev, prev_grey;

	cap >> prev;//get the first frame.ch
	cvtColor(prev, prev_grey, COLOR_BGR2GRAY);

	// Step 1 - Get previous to current frame transformation (dx, dy, da) for all frames
	vector <TransformParam> prev_to_cur_transform; // previous to current
	// Accumulated frame to frame transform
	double a = 0;
	double x = 0;
	double y = 0;
	// Step 2 - Accumulate the transformations to get the image trajectory
	vector <Trajectory> trajectory; // trajectory at all frames
	//
	// Step 3 - Smooth out the trajectory using an averaging window
	vector <Trajectory> smoothed_trajectory; // trajectory at all frames
	Trajectory X;//posteriori state estimate
	Trajectory	X_;//priori estimate
	Trajectory P;// posteriori estimate error covariance
	Trajectory P_;// priori estimate error covariance
	Trajectory K;//gain
	Trajectory	z;//actual measurement
	double pstd = 4e-3;//can be changed
	double cstd = 0.25;//can be changed
	Trajectory Q(pstd, pstd, pstd);// process noise covariance
	Trajectory R(cstd, cstd, cstd);// measurement noise covariance 
	// Step 4 - Generate new set of previous to current transform, such that the trajectory ends up being the same as the smoothed trajectory
	vector <TransformParam> new_prev_to_cur_transform;
	//
	// Step 5 - Apply the new transformation to the video
	//cap.set(CV_CAP_PROP_POS_FRAMES, 0);
	Mat T(2, 3, CV_64F);

	int vert_border = HORIZONTAL_BORDER_CROP * prev.rows / prev.cols; // get the aspect ratio correct
	VideoWriter outputVideo;
	outputVideo.open("compare.avi", CV_FOURCC('X', 'V', 'I', 'D'), 24, cvSize(cur.rows, cur.cols * 2 + 10), true);
	//
	int k = 1;
	int max_frames = cap.get(CV_CAP_PROP_FRAME_COUNT);
	Mat last_T;
	Mat prev_grey_, cur_grey_;

	while (true) {

		cap >> cur;
		if (cur.data == NULL) {
			break;
		}

		cvtColor(cur, cur_grey, COLOR_BGR2GRAY);

		// vector from prev to cur
		vector <Point2f> prev_corner, cur_corner;
		vector <Point2f> prev_corner2, cur_corner2;
		vector <uchar> status;
		vector <float> err;

		goodFeaturesToTrack(prev_grey, prev_corner, 200, 0.01, 30);
		calcOpticalFlowPyrLK(prev_grey, cur_grey, prev_corner, cur_corner, status, err);

		// weed out bad matches
		for (size_t i = 0; i < status.size(); i++) {
			if (status[i]) {
				prev_corner2.push_back(prev_corner[i]);
				cur_corner2.push_back(cur_corner[i]);
			}
		}

		// translation + rotation only
		Mat T = estimateRigidTransform(prev_corner2, cur_corner2, false); // false = rigid transform, no scaling/shearing

		// in rare cases no transform is found. We'll just use the last known good transform.
		if (T.data == NULL) {
			last_T.copyTo(T);
		}

		T.copyTo(last_T);

		// decompose T
		double dx = T.at<double>(0, 2);
		double dy = T.at<double>(1, 2);
		double da = atan2(T.at<double>(1, 0), T.at<double>(0, 0));
		//
		//prev_to_cur_transform.push_back(TransformParam(dx, dy, da));

		out_transform << k << " " << dx << " " << dy << " " << da << endl;
		//
		// Accumulated frame to frame transform
		x += dx;
		y += dy;
		a += da;
		//trajectory.push_back(Trajectory(x,y,a));
		//
		out_trajectory << k << " " << x << " " << y << " " << a << endl;
		//
		z = Trajectory(x, y, a);
		//
		if (k == 1){
			// intial guesses
			X = Trajectory(0, 0, 0); //Initial estimate,  set 0
			P = Trajectory(1, 1, 1); //set error variance,set 1
		}
		else
		{
			//time update£¨prediction£©
			X_ = X; //X_(k) = X(k-1);
			P_ = P + Q; //P_(k) = P(k-1)+Q;
			// measurement update£¨correction£©
			K = P_ / (P_ + R); //gain;K(k) = P_(k)/( P_(k)+R );
			X = X_ + K*(z - X_); //z-X_ is residual,X(k) = X_(k)+K(k)*(z(k)-X_(k)); 
			P = (Trajectory(1, 1, 1) - K)*P_; //P(k) = (1-K(k))*P_(k);
		}
		//smoothed_trajectory.push_back(X);
		out_smoothed_trajectory << k << " " << X.x << " " << X.y << " " << X.a << endl;
		//-
		// target - current
		double diff_x = X.x - x;//
		double diff_y = X.y - y;
		double diff_a = X.a - a;

		dx = dx + diff_x;
		dy = dy + diff_y;
		da = da + diff_a;

		//new_prev_to_cur_transform.push_back(TransformParam(dx, dy, da));
		//
		out_new_transform << k << " " << dx << " " << dy << " " << da << endl;
		//
		T.at<double>(0, 0) = cos(da);
		T.at<double>(0, 1) = -sin(da);
		T.at<double>(1, 0) = sin(da);
		T.at<double>(1, 1) = cos(da);

		T.at<double>(0, 2) = dx;
		T.at<double>(1, 2) = dy;

		Mat cur2;

		warpAffine(prev, cur2, T, cur.size());

		cur2 = cur2(Range(vert_border, cur2.rows - vert_border), Range(HORIZONTAL_BORDER_CROP, cur2.cols - HORIZONTAL_BORDER_CROP));

		// Resize cur2 back to cur size, for better side by side comparison
		resize(cur2, cur2, cur.size());

		// Now draw the original and stablised side by side for coolness
		Mat canvas = Mat::zeros(cur.rows, cur.cols * 2 + 10, cur.type());

		prev.copyTo(canvas(Range::all(), Range(0, cur2.cols)));
		cur2.copyTo(canvas(Range::all(), Range(cur2.cols + 10, cur2.cols * 2 + 10)));

		// If too big to fit on the screen, then scale it down by 2, hopefully it'll fit :)
		if (canvas.cols > 1920) {
			resize(canvas, canvas, Size(canvas.cols / 2, canvas.rows / 2));
		}
		//outputVideo<<canvas;
		imshow("before and after", canvas);

		waitKey(10);
		//
		prev = cur.clone();//cur.copyTo(prev);
		cur_grey.copyTo(prev_grey);

		cout << "Frame: " << k << "/" << max_frames << " - good optical flow: " << prev_corner2.size() << endl;
		k++;

	}
	return 0;
}
int main(int, char**)
{
	string filename = "HorizontalView.3gp";
	VideoCapture cap(filename); // open the file.
	if(!cap.isOpened())  // check if we succeeded
		return -1;
	cap.set(CV_CAP_PROP_POS_FRAMES, 0);
	const string fileOutName = "HorizontalViewOut2.mp4";
	Size s = Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH), (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT));
	VideoWriter outputVideo;
	outputVideo.open(fileOutName,CV_FOURCC('M', 'J', 'P', 'G'),30,s,true);
	if(!outputVideo.isOpened())
		return -1;
	//Initialize variables.
    Mat hsv,blur,thresholded, mser, rthresh;
	//Create a MSER feature detector
	Ptr<FeatureDetector> defaultDetector;
	Ptr<FeatureDetector> blobDetector;
	defaultDetector = FeatureDetector::create("MSER");
	blobDetector = FeatureDetector::create("SimpleBlob");
	int min_area = 700;
	int rmin,gmin,bmin,rmax,gmax,bmax = 0;
	int huemin,satmin,valmin,huemin2,satmin2,valmin2 = 0;
	int huemax,satmax,valmax,huemax2,satmax2,valmax2 = 256;
	valmin = 230;
	int colr =  0;
    namedWindow("hsv",1);
    namedWindow("normal",2);
	namedWindow("blur",3);
	//namedWindow("thresh",4);
	createTrackbar("huemin", "blur", &huemin, 256);
	createTrackbar("satmin", "blur", &satmin, 256);
	createTrackbar("valmin", "blur", &valmin, 256);
	createTrackbar("huemax", "blur", &huemax, 256);
	createTrackbar("satmax", "blur", &satmax, 256);
	createTrackbar("valmax", "blur", &valmax, 256);
	/*createTrackbar("rmin", "thresh", &rmin, 256);
	createTrackbar("gmin", "thresh", &gmin, 256);
	createTrackbar("bmin", "thresh", &bmin, 256);
	createTrackbar("rmax", "thresh", &rmax, 256);
	createTrackbar("gmax", "thresh", &gmax, 256);
	createTrackbar("bmax", "thresh", &bmax, 256);*/
	huemin = 123;
	satmin = 32;
	valmin = 218;
	huemax = 188;
	satmax = 256;
	valmax = 256;
	huemin2 = 60;
	satmin2 = 30;
	valmin2 = 134;
	huemax2 = 89;
	satmax2 = 150;
	valmax2 = 256;
	int frame_count = 0;
	for(;;) {
		defaultDetector->set("minArea",min_area);
		//Read in a frame from video data.		
		Mat frame,frameBot,frameTop;
		cap >> frame;
		
		//frameTop = frame.rowRange(Range(0,(int)(frame.rows/2));
		frameBot = frame.rowRange(Range((int)(frame.rows/2),frame.rows));
		//Convert to HSV color space.
        cvtColor(frameBot, hsv, CV_BGR2HSV);
		//Threshold the image to only look for bright red + green spots.
		inRange(hsv, Scalar(huemin,satmin,valmin), Scalar(huemax,satmax,valmax), thresholded);
		inRange(hsv, Scalar(huemin2,satmin2,valmin2), Scalar(huemax2,satmax2,valmax2), rthresh);
		bitwise_or(thresholded,rthresh,thresholded);
		//Blur the image to make detections more robust.
		GaussianBlur(thresholded, blur, Size(9,9), 3, 3);
		//Apply the MSER detector to the blurred image.
		vector<KeyPoint> keypoints;
		defaultDetector->detect(blur,keypoints);
		//Display the maximal keypoints over the original image.
		for(size_t i = 0; i < keypoints.size(); i++){
			if(!supress(keypoints,keypoints[i])){		
				Point center = keypoints[i].pt;
				int radius = cvRound(keypoints[i].size/2);
				Point topLeft = center;
				topLeft.x -= radius;
				if(topLeft.x < 0)
					topLeft.x = 0;
				topLeft.y -= radius;
				if(topLeft.y < 0)
					topLeft.y = 0;
				Point botRight = center;
				botRight.y += radius;
				if(botRight.y >= frameBot.rows)
					botRight.y = frameBot.rows-1;
				botRight.x += radius;
				if(botRight.x >= frameBot.cols)
					botRight.x = frameBot.cols-1;
				int width = botRight.x - topLeft.x;
				int height = botRight.y - topLeft.y;
				//Bounding box of a maximal detection.
				Rect r(topLeft.x,topLeft.y,width,height);
				//Draw the rectangle to the output 
				rectangle(frameBot,r,Scalar(256,0,0),1,CV_AA);
				char c[50];
				int n = sprintf(c,"Response:%f",keypoints[i].response);
				string s(c,n);
				putText(frameBot,s,topLeft,FONT_HERSHEY_COMPLEX,1.0,Scalar(0,255,0));
				//circle( frameBot, center, 3, Scalar(0,255,0), -1, 8, 0 );
				//circle( frameBot, center, radius, Scalar(255,0,0), 3, 8, 0 );
			}
		}
	//	drawKeypoints( blur, keypoints, blur, Scalar::all(-1));
		//Show all of the frames.
		//frame.rowRange(Range((int)(frame.rows/2),frame.rows)) = frameBot;
		imshow("hsv",hsv);
        imshow("normal", frameBot);
		imshow("blur", blur);
	//	std::cout<<frame_count<<"\n";
	//	frame_count++;
		//outputVideo << frame;
		//Wait a pre-slected amount of time to keep framerate consistent if possible.
		//if(waitKey(0) >= 0) break;
		waitKey(0);
	}
    return 0;
}
Exemple #16
0
int main(int argc, char* argv[]) {

	Mat frame;

	printHelp();
	if (argc != 16) {
		printf("\nInvalid parameters\n.");
		exit(-1);
	}

	char fileName[80];
	char baseName[80]; sprintf(baseName, "%s", argv[1] );

	int firstBGF = atoi(argv[2]);
	int lastBGF = atoi(argv[3]);
	int stepBGF = atoi(argv[4]);

	int firstF = atoi(argv[5]);
	int lastF = atoi(argv[6]);
	int stepF = atoi(argv[7]);

	int iLevels = atoi(argv[8]);
	float loThL = atof(argv[9]);
	float hiThL = atof(argv[10]);
	float loThC = atof(argv[11]);
	float hiThC = atof(argv[12]);
	int minGap = atoi(argv[13]);
	int maxGap = atoi(argv[14]);
	int hGap   = atoi(argv[15]);

	sprintf(fileName, "%s%d.jpg", baseName, firstF);
	frame = imread(fileName, 1);
	if (!frame.data) {
		printf("Could not open file %s.\n", fileName);
		return (-1);
	}

	printf("Input frame resolution: %d x %d\n", frame.cols, frame.rows);

	// pyramid
	vector<Mat> pyr;
	buildPyramid(frame, pyr, iLevels);
	printf("Image resolution %d x %d \n", pyr[iLevels].cols, pyr[iLevels].rows);

	// BackGround subtraction
	BGS bgs;
	bgs.Init(pyr[iLevels]);
	//  BGS parameters
	bgs.SetThresholdL(loThL, hiThL);
	bgs.SetThresholdC(loThC, hiThC);
	bgs.SetMinMaxGap(minGap, maxGap);
	bgs.SetHGap(hGap);

	// create windows to display images
	namedWindow(WIN, CV_WINDOW_AUTOSIZE );
#if DEBUG
	namedWindow(WAN, CV_WINDOW_AUTOSIZE );
	namedWindow(WBN, CV_WINDOW_AUTOSIZE );
	namedWindow(WLN, CV_WINDOW_AUTOSIZE );
#endif

	// Build background model
	bool continua = true;
	int cont = firstBGF;
	int step = stepBGF;
	while ( continua && cont < lastBGF )
	{
#if MEDIAN_BGS
		bgs.AddMedianSample(pyr[iLevels]);
#else
		bgs.AddMeanSample(pyr[iLevels]);
#endif
		bgs.Show(bgs.outP, WAN, WBN, WLN );

		// load frame
		cont += step;
		if (cont >= lastBGF) break;

		sprintf(fileName, "%s%d.jpg", baseName, cont);
		frame = imread(fileName, 1);
		if (!frame.data) {
			printf("Could not open file %s.\n", fileName);
			return (-1);
		}

		buildPyramid(frame, pyr, iLevels);
		imshow(WIN, pyr[iLevels]);

		if (waitKey(30) >= 0)
			continua = false;
	}
#if MEDIAN_BGS
	bgs.ComputeMedianBG();
#else
	bgs.ComputeMeanBG();
#endif
	bgs.Show(bgs.luvP, WAN, WBN, WLN );
	imshow(WIN, bgs.bgBGRImg);

	printf("Press q to quit or c to continue.\n");
	char k = waitKey();
	if (k == 'q' || k == 'Q')
		return 0;

	///////////////////////////////////////////////////////////////////////
	////
	//// Process the video
	////
    ///////////////////////////////////////////////////////////////////////


	// loop parameters
	Mat binImg;
	binImg = Mat (pyr[iLevels].size(), CV_8UC1);
	namedWindow(WBG, CV_WINDOW_AUTOSIZE );

	int notEnd = 1;
	char keyPressed;


	/// Video
	VideoWriter vw;
	vw.open("movie.mpeg", CV_FOURCC('P','I','M','1'), 25, pyr[iLevels].size());

	continua = true;
	for (cont = firstF; continua && cont<lastF; cont+= stepF) {

		// load image
		sprintf(fileName, "%s%d.jpg", baseName, cont);
		frame = imread(fileName, 1);
		if (!frame.data) {
			printf("Could not open file %s.\n", fileName);
			return (-1);
		}

		buildPyramid(frame, pyr, iLevels);
		imshow(WIN, pyr[iLevels]);
#if MEDIAN_BGS
		bgs.BGSeg(pyr[iLevels], binImg);
		imshow(WBG, binImg);
		imshow(WAN, bgs.hiImg);
		imshow(WBN, bgs.loImg);
#else
		bgs.Subtract(pyr[iLevels], binImg);
		imshow(WBG, binImg);
#endif
		Mat vid;
		cvtColor(binImg, vid, CV_GRAY2BGR);
		sprintf(fileName, "out/%d.jpg", cont);
       
        imwrite(fileName, vid);
		vw << vid;

		keyPressed = waitKey(10);

		switch (keyPressed) {
		case 'q':
		case 'Q':
			notEnd = 0;
			break;
		default:
			break;
		}
	}

	return 0;
}
Exemple #17
0
void main(int argc, char *argv[])
{
	Mat emptyFrame = Mat::zeros(Camera::reso_height, Camera::reso_width, CV_8UC3);
	Thesis::FastTracking fastTrack(20); //used to be 50, why? i dno
	Thesis::KalmanFilter kalman;
	kalman.initialise(CoordinateReal(0, 0, 0));
	kalman.openFile();
	// the two stereoscope images
	Camera one(0,-125,0,0,0,90);
	Camera two(2, 125,0,0,0,90);
	Camera three;
	// list of cameras and cameraLocs
	std::vector<Camera> cameraList;
	std::vector<CoordinateReal> locList;
	VideoWriter writeOne ;
	VideoWriter writeTwo;
	VideoWriter writeThree;
	VideoCapture capOne;
	VideoCapture capTwo;
	VideoCapture capThree;
	Thesis::Stats stat;
	cv::Point2d horizontalOne(0,Camera::reso_height/2);
	cv::Point2d horizontalTwo(Camera::reso_width, Camera::reso_height/2);
	cv::Point2d verticalOne(Camera::reso_width / 2, 0);
	cv::Point2d verticalTwo(Camera::reso_width / 2, Camera::reso_height);
	ofstream framesFile_;
	framesFile_.open("../../../../ThesisImages/fps_ABSDIFF.txt");
	double framesPerSecond = 1 / 10.0;
	//open the recorders
	FeatureExtraction surf(5000);
	Stereoscope stereo;
	Util util;
	bool once = false;
	bool foundInBoth = false;
	bool foundInMono = false;
	std::vector<cv::Point2f> leftRect(4);
	cv::Rect leftRealRect;
	cv::Rect rightRealRect;
	std::vector<cv::Point2f> rightRect(4);
	cv::Mat frameLeft;
	cv::Mat frameRight;
	cv::Mat frameThree;
	cv::Mat prevFrameLeft;
	cv::Mat prevFrameRight;
	cv::Mat prevFrameThree;

	// check if you going to run simulation or not or record
	cout << " run simulation: 's' or normal: 'n' or record 'o' or threeCameras 'c' " << endl;
	imshow("main", emptyFrame);
	char command = waitKey(0);

	string left = "../../../../ThesisImages/leftTen.avi";
	string right = "../../../../ThesisImages/rightTen.avi";
	string mid = "../../../../ThesisImages/midTen.avi";
	commands(command);
	emptyFrame = Mat::ones(10, 10, CV_64F);
	imshow("main", emptyFrame);
	command = waitKey(0);
	camCount(command);
	// checkt the cam count 
	if (multiCams){
		//load in all the cameras
		three = Camera(3, 175, -50, 585, 7.1, 97);//Camera(3, 200, -60, 480, 7,111);
	}
	//==========hsv values=======================
	cv::Mat hsvFrame;
	cv::Mat threshold;
	int iLowH = 155;
	int iHighH = 179;

	int iLowS = 75;
	int iHighS = 255;

	int iLowV = 0;
	int iHighV = 255;
	
	//=================================
	double elapsedTime = 0;
	double waitDelta = 0;	
	if (record){
		writeOne.open("../../../../ThesisImages/leftTen.avi", 0, 10, cv::Size(864, 480), true);
		writeTwo.open("../../../../ThesisImages/rightTen.avi", 0, 10, cv::Size(864, 480), true);
		writeThree.open("../../../../ThesisImages/midTen.avi", 0, 10, cv::Size(864, 480), true);
	}else if (simulation){
		capOne.open(left);
		capTwo.open(right);
		capThree.open(mid);
		assert(capOne.isOpened() && capTwo.isOpened());
	}
	 if (hsv){
		//Create trackbars in "Control" window
		cvCreateTrackbar("LowH", "main", &iLowH, 179); //Hue (0 - 179)
		cvCreateTrackbar("HighH", "main", &iHighH, 179);

		cvCreateTrackbar("LowS", "main", &iLowS, 255); //Saturation (0 - 255)
		cvCreateTrackbar("HighS", "main", &iHighS, 255);

		cvCreateTrackbar("LowV", "main", &iLowV, 255); //Value (0 - 255)
		cvCreateTrackbar("HighV", "main", &iHighV, 255);
	}
	if(!simulation){
		cout << " adding" << endl;
		surf.addImageToLib("backToTheFutureCover.jpg");
	}
	CoordinateReal leftLoc;
	CoordinateReal rightLoc;
	CoordinateReal threeLoc;
	while (running){
		clock_t beginTime = clock();
		commands(command);
		if (found){
			kalman.predictState();
			kalman.printCurrentState();
		}
		int thickness = -1;
		int lineType = 8;
		//normal running
		if (!simulation){
			frameLeft = one.grabFrame();
			frameRight = two.grabFrame();
			if (multiCams){
				frameThree = three.grabFrame();
			}
		}
		else{
			 //if last frame, release then reopen
			if (capOne.get(CV_CAP_PROP_POS_FRAMES) == (capOne.get(CV_CAP_PROP_FRAME_COUNT) - 1)){
				capOne.release();
				capTwo.release();
				capOne.open(left);
				capTwo.open(right);
				if (multiCams){
					capThree.release();
					capThree.open(mid);
				}
			}
			// means it is simulation: i.e frames come from a video
			capOne >> frameLeft;
			capTwo >> frameRight;
			if (multiCams){
				capThree >> frameThree;
			}
		}
		if (hsv){
			//convert the frame into hsv
			cvtColor(frameLeft, hsvFrame, COLOR_BGR2HSV);
			inRange(hsvFrame, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), threshold);
			blur(threshold, threshold, cv::Size(20, 20));
			cv::threshold(threshold, threshold, 50, 255, THRESH_BINARY);
			//imshow("imageTwo", hsvFrame);
			imshow("hsv", threshold);
		}
	
		if (record){
			writeOne.write(frameLeft);
			writeTwo.write(frameRight);
			if (multiCams){
				writeThree.write(frameThree);
			}
		}
		if (command == ' '){
			//left frame =============================
			cout << "pressedSpace " << endl;
			std::vector<CoordinateReal> coordLeft = surf.detect(frameLeft, true, found, leftRealRect);
			if (!coordLeft.empty()){
				int thickness = -1;
				int lineType = 8;
				cv::circle(frameLeft, cv::Point2f(coordLeft[0].x(), coordLeft[0].y()), 5,
					cv::Scalar(0, 0, 255),
					thickness,
					lineType);
				leftRect = surf.getSceneCorners();
				line(frameLeft, leftRect[0], leftRect[1], cv::Scalar(0, 255, 0), 2); //TOP line
				line(frameLeft, leftRect[1], leftRect[2], cv::Scalar(0, 0, 255), 2);
				line(frameLeft, leftRect[2], leftRect[3], cv::Scalar(0, 255, 0), 2);
				line(frameLeft, leftRect[3], leftRect[0], cv::Scalar(0, 255, 0), 2);
				leftRealRect = util.getSizedRect(leftRect, one.reso_height, one.reso_width, 0.1);
				leftLoc = coordLeft[0];
			}
			//right frame ==================================
			std::vector<CoordinateReal> coordRight = surf.detect(frameRight, true, found, rightRealRect);
			if (!coordRight.empty()){
				int thickness = -1;
				int lineType = 8;
				cv::circle(frameRight, cv::Point2f(coordRight[0].x(), coordRight[0].y()), 5,
					cv::Scalar(0, 0, 255),
					thickness,
					lineType);
				rightRect = surf.getSceneCorners();
				line(frameRight, rightRect[0], rightRect[1], cv::Scalar(0, 255, 0), 2); //TOP line
				line(frameRight, rightRect[1], rightRect[2], cv::Scalar(0, 0, 255), 2);
				line(frameRight, rightRect[2], rightRect[3], cv::Scalar(0, 255, 0), 2);
				line(frameRight, rightRect[3], rightRect[0], cv::Scalar(0, 255, 0), 2);
				rightRealRect = util.getSizedRect(rightRect, one.reso_height, one.reso_width, 0.1);
				rightLoc = coordRight[0];
			}
			if (multiCams){
				std::vector<CoordinateReal> coordThrees = surf.detect(frameThree, true, false, leftRealRect);
				CoordinateReal coordThree = coordThrees[0];
				rightRect = surf.getSceneCorners();
				line(frameThree, rightRect[0], rightRect[1], cv::Scalar(0, 255, 0), 2); //TOP line
				line(frameThree, rightRect[1], rightRect[2], cv::Scalar(0, 0, 255), 2);
				line(frameThree, rightRect[2], rightRect[3], cv::Scalar(0, 255, 0), 2);
				line(frameThree, rightRect[3], rightRect[0], cv::Scalar(0, 255, 0), 2);
				cout << " foundIN x: " << coordThree.x() << "found in y: " << coordThree.y() << endl;
				foundInMono = true;
				threeLoc = coordThree;
			}
			found = true;
			
		}
		else if(!record){
			cout << " fastTracking " << endl;
			if (once){
				CoordinateReal leftCameraLoc(0, 0, 0);
				CoordinateReal rightCameraLoc(0,0,0);
				if (found) {
					leftCameraLoc = kalman.expectedLocObs(one);
					rightCameraLoc = kalman.expectedLocObs(two);
				}
				leftLoc = fastTrack.findObject(frameLeft, prevFrameLeft, leftCameraLoc,leftDebug);
				rightLoc = fastTrack.findObject(frameRight, prevFrameRight, rightCameraLoc ,rightDebug);
				// go through the list of locations 
				if (multiCams){
					CoordinateReal miscCameraLoc(0, 0, 0);
					if (found){
						miscCameraLoc = kalman.expectedLocObs(three);
					}
					threeLoc = fastTrack.findObject(frameThree, prevFrameThree, miscCameraLoc, threeDebug);
				}
			}
			frameLeft.copyTo(prevFrameLeft);
			frameRight.copyTo(prevFrameRight);
			if (multiCams){
				frameThree.copyTo(prevFrameThree);
			}
			once = true;
			cv::circle(frameLeft, cv::Point2f(leftLoc.x(), leftLoc.y()), 5,
				cv::Scalar(0, 0, 255),
				thickness,
				lineType);
			cv::circle(frameRight, cv::Point2f(rightLoc.x(), rightLoc.y()), 5,
				cv::Scalar(0, 0, 255),
				thickness,
				lineType);
			cv::circle(frameThree, cv::Point2f(threeLoc.x(), threeLoc.y()), 5,
				cv::Scalar(0, 0, 255),
				thickness,
				lineType);
		}
		if (multiCams){
			foundInMono = Util::isInFrame(threeLoc);
		}
		foundInBoth = Util::isInBothFrames(leftLoc, rightLoc);
	    
		if (foundInBoth){
			CoordinateReal real = stereo.getLocation(leftLoc, rightLoc);
			//print the current location
			cout << "x: " << real.x() << "y: " << real.y() << "z: " << real.z() << endl;
			//cout << "time in seconds" << float(clock() - beginTime) / CLOCKS_PER_SEC << endl;
			if (!found){
				cout << "initialising kalman filter" << endl;
				kalman.initialise(real);
			}
			else {
				kalman.stereoObservation(real);
			}
			 
			double curTime = double(clock())/CLOCKS_PER_SEC;
			cout << "curTime" << curTime << endl;
			stat.getVel(real, curTime);
			foundInBoth = false;
			found = true;
		}
		if (foundInMono){
			// pass the observation 
			cout << "found in mono" << endl;
			kalman.observation(threeLoc, three);
			foundInMono = false;
		}
		if (cross){
			// add cross to all the frames
			line(frameRight, horizontalOne, horizontalTwo, cv::Scalar(0, 255, 0), 2); 
			line(frameRight, verticalOne, verticalTwo, cv::Scalar(0, 0, 255), 2);
			line(frameLeft, horizontalOne, horizontalTwo, cv::Scalar(0, 255, 0), 2);
			line(frameLeft, verticalOne, verticalTwo, cv::Scalar(0, 0, 255), 2);
			//multi cam
			if (multiCams){
				line(frameThree, horizontalOne, horizontalTwo, cv::Scalar(0, 255, 0), 2);
				line(frameThree, verticalOne, verticalTwo, cv::Scalar(0, 0, 255), 2);
			}
		}
		cv::imshow("left", frameLeft);
		cv::imshow("right", frameRight);
		if (multiCams){
			cv::imshow("mid", frameThree);
		}
		command = waitKey(1);
		if (surfing){
			cout << "wait" << endl;
			waitKey(0);
			surfing = false;
		}
		clock_t end = clock();
		elapsedTime = double(end - beginTime) / CLOCKS_PER_SEC;
		waitDelta = framesPerSecond - elapsedTime;
		if (waitDelta > 0){
			command = waitKey(waitDelta* 1000);
		}
		 end = clock();
		elapsedTime = double(end - beginTime) / CLOCKS_PER_SEC;
		cout << "fps"  << 1 / elapsedTime << endl;
		//convert fps to string
		string fps = std::to_string(1 / elapsedTime);
		fps += "\n";
		framesFile_ << fps;

	}
	framesFile_.close();
	kalman.closeFile();
	return;
}
Exemple #18
0
int main(int argc, char *argv[])
{

    Temporal_Filter filter;
    Mat dst;
 //   help();

    //if (argc != 4)
    {
      //  cout << "Not enough parameters" << endl;
       // return -1;
    }

    const string source      = argv[1];           // the source file name
    const bool askOutputType = false;  // If false it will use the inputs codec type

    VideoCapture inputVideo(source);              // Open input
    if (!inputVideo.isOpened())
    {
        cout  << "Could not open the input video: " << source << endl;
        return -1;
    }

    string::size_type pAt = source.find_last_of('.');                  // Find extension point
    const string NAME = argv[2];   // Form the new name with container
    int ex = static_cast<int>(inputVideo.get(CV_CAP_PROP_FOURCC));     // Get Codec Type- Int form

    // Transform from int to char via Bitwise operators
    char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};

    Size S = Size(320,240);   // Acquire input size
                  //(int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));

    VideoWriter outputVideo;                                        // Open the output
    if (askOutputType)
        outputVideo.open(NAME, ex=-1, inputVideo.get(CV_CAP_PROP_FPS), S, true);
    else
        outputVideo.open(NAME, ex, inputVideo.get(CV_CAP_PROP_FPS), S, true);

    if (!outputVideo.isOpened())
    {
        cout  << "Could not open the output video for write: " << source << endl;
        return -1;
    }

    cout << "Input frame resolution: Width=" << S.width << "  Height=" << S.height
         << " of nr#: " << inputVideo.get(CV_CAP_PROP_FRAME_COUNT) << endl;
    cout << "Input codec type: " << EXT << endl;


    Mat src, res;
    vector<Mat> spl;
    Mat a;
    a.create(240,320,CV_8UC(3));


    for(;;) //Show the image captured in the window and repeat
    {
        inputVideo >> src;              // read

        if (src.empty()) break;         // check if at end

        cvtColor(src,src,CV_BGR2GRAY);
        resize(src,a, a.size(), 0, 0, INTER_NEAREST);
        imshow("input",a);
        dst=filter.temporal_filter(a);

        cvtColor(dst,res,CV_GRAY2BGR);
       //outputVideo.write(res); //save or
       outputVideo << res;
       imshow("filtered",res);
       cv::waitKey(1);
    }

    cout << "Finished writing" << endl;
    return 0;
}
void writeForegroundVideoWithoutNoise()
{
    Mat matFrame(frameWithoutNoise, false);
    foregroundVideoWithoutNoise.write(matFrame);
}
void split_img( int split_num, int x, int y, int num_x, int num_y, int time_split, string fn, string out_dir, int overlap) {
  mtx.lock();
  cout << "worker " << x << "-" << y << "-" << split_num << " running." << endl;
  mtx.unlock();
  VideoCapture capture(fn);
  int max_frames = capture.get(CV_CAP_PROP_FRAME_COUNT);

  int rect_width = (int) capture.get(CV_CAP_PROP_FRAME_WIDTH)/num_x;
  int rect_height = (int) capture.get(CV_CAP_PROP_FRAME_HEIGHT)/num_y;

  int start_frame = split_num * (max_frames / time_split);

  capture.set(CV_CAP_PROP_POS_FRAMES, start_frame);

  int this_x = x*rect_width;
  int this_y = y*rect_height;
  int this_width = rect_width;
  int this_height = rect_height;

  //Determine whether to add overlap to the quadrant.
  if (x > 0){
    this_x -= overlap;
    this_width += overlap;
  }
  if (y > 0) {
    this_y -= overlap;
    this_height += overlap;
  }
  if (x != num_x-1) {
    this_width += overlap;
  }
  if (y != num_y-1) {
    this_height += overlap;
  }

  Size sz = Size(this_width, this_height);

  stringstream out_key;
  out_key << y << "-" << x << "-" << split_num;
  VideoWriter writer;
  string out_fn = out_dir + "/" + out_key.str() + ".avi"; 
  writer.open(out_fn, CV_FOURCC('m','p','4','v'), capture.get(CV_CAP_PROP_FPS), sz, true);
  //writer.open(out_fn, CV_FOURCC('m','p','4','v'), 29.97, sz, true);

  Mat src;
  Rect rect = Rect( this_x, this_y, this_width, this_height );

  int k = 0;
  while ( k < (max_frames / time_split)-1 )
  {
    capture >> src;
    if ( src.data == NULL )
      break;

    writer << Mat(src, rect);

    int percent = int(100 * ((float)k/(max_frames/time_split)));
    if ( percent % 5 == 0 )
    {
      mtx.lock();
      cout << "worker " << x << "-" << y << "-" << split_num << ": " << percent << "%" << endl;
      mtx.unlock();
    }
    k++;
  }
}
Exemple #21
0
int main(int argc, char* argv[])
{

	VideoCapture capture;

	// Objects
	Mat frame;

	// keyboard pressed
	char keypressed = 0;
	bool success;

	// Load image from disk
	capture.open(0);
	// if not success, exit program
	if (!capture.isOpened()){
		cout << "error in VideoCapture: check path file" << endl;
		getchar();
		return 1;
	}

	/// Parameters for Shi-Tomasi algorithm
	vector<Point2f> cornersA, cornersB;
	double qualityLevel = 0.01;
	double minDistance = 10;
	int blockSize = 3;
	bool useHarrisDetector = false; 
	double k = 0.04;
	int maxCorners = MAX_CORNERS;

	// winsize has to be 11 or 13, otherwise nothing is found
	vector<uchar> status;
	vector<float> error;
	int winsize = 11;
	int maxlvl = 5;

	// Objects
	Mat img_prev, img_next, grayA, grayB;

	success = capture.read(frame);
	// if no success exit program
	if (success == false){
		cout << "Cannot read the frame from file" << endl;
		getchar();
		return 1;
	}

	img_prev = frame.clone();

	// Windows for all the images
	namedWindow("Corners A", CV_WINDOW_AUTOSIZE);
	namedWindow("Corners B", CV_WINDOW_AUTOSIZE);
    
    VideoWriter outputVideo;
    Size S = Size((int) capture.get(CV_CAP_PROP_FRAME_WIDTH),    // Acquire input size
                  (int) capture.get(CV_CAP_PROP_FRAME_HEIGHT));
    int ex = static_cast<int>(capture.get(CV_CAP_PROP_FOURCC));  
    outputVideo.open("video.avi", CV_FOURCC('P','I','M','1'), capture.get(CV_CAP_PROP_FPS), S, true);
    
    if (!outputVideo.isOpened())
    {
        cout  << "Could not open the output video for write: "  << endl;
        return -1;
    }
    
	while (keypressed != ESCAPE)
	{
		// read frame by frame in a loop
		success = capture.read(frame);
		// if no success exit program
		if (success == false){
			cout << "Cannot read the frame from file" << endl;
			return 1;
		}

		img_next = frame.clone();

		// convert to grayScale
		cvtColor(img_prev, grayA, CV_RGB2GRAY);
		cvtColor(img_next, grayB, CV_RGB2GRAY);

		/// Apply corner detection
		goodFeaturesToTrack(grayA,
			cornersA,
			maxCorners,
			qualityLevel,
			minDistance,
			Mat(),
			blockSize,
			useHarrisDetector,
			k);

		calcOpticalFlowPyrLK(grayA, grayB, cornersA, cornersB, status, error,
			Size(winsize, winsize), maxlvl);

		/// Draw corners detected
		//cout << "Number of cornersA detected: " << cornersA.size() << endl;
		//cout << "Optical Flow corners detected: " << cornersB.size() << endl;
		for (int i = 0; i < cornersA.size(); i++)
		{
			line(img_prev, cornersA[i], cornersB[i], Scalar(0, 255, 0), 2);
		}

		// Show image in the name of the window
		imshow("Corners A", img_prev);
		imshow("Corners B", img_next);
        outputVideo << img_prev;
        
		// Function for show the image in ms.
		keypressed = waitKey(1);
		img_prev = img_next;
		

	}
	// Free memory
	img_prev.release();
	img_next.release();
	grayA.release();
	grayB.release();
	destroyAllWindows();
	// End of the program
	return 0;
}
void processVideo(CMFC_CartoonApp *theApp)
{
	processedFrame = false;
	vector<Rect> vec_rect_facesCascade;
	Mat myFrame = cvQueryFrame(myCap);//frame 指向g_capture指向的AVI文件的当前读入帧

	if (myFrame.empty())//判断是否帧为空  
	{
		cout << "视频文件播放完毕" << endl;
		cout << "nnn " << nnn << endl; 
		theApp->dlg.KillTimer(1);	//停止
		return;
	}
	
	Mat targetImg, originalMat;
	targetImg = myFrame.clone();
	originalMat = myFrame.clone();
	//滚动条
	theApp->dlg.m_slider.SetPos(++pos);


	//绘制人脸检测矩形框
	if(detectFace && grabFlag == false)
	{
		//TFacePosition *FaceArray;
		//HImage hTargetImg;
		//int detectedCount = -1;	//当前帧的人脸数
		//hTargetImg = loadImageFromMat(targetImg);
		//FSDK_FaceTemplate *detectedFaceTemplates = new FSDK_FaceTemplate[20];
		//FaceArray = detectUsingLuxand(hTargetImg, detectedCount, detectedFaceTemplates, NULL);	//检测当前帧的人脸
		//for (int i = 0; i < detectedCount; i++)
		//{
		//	TFacePosition facePosition = FaceArray[i];
		//	rectangle(targetImg, Rect(facePosition.xc - facePosition.w/2, facePosition.yc - facePosition.w/2, facePosition.w, facePosition.w), cvScalar(255, 0, 255), 2);		
		//}
		
		detectCascade(targetImg, cascade1, 1, vec_rect_facesCascade);
		for(int j = 0; j < vec_rect_facesCascade.size(); j++)
		{
			rectangle(targetImg, vec_rect_facesCascade.at(j), cvScalar(255, 0, 255), 2);		
		}
	}

	if (grabFlag == true && vec_FaceCartoons.size() > 0)
	{
		base_test1_str += "pos: " + toString(pos) + "\n";

		//检测是否切换了场景
		if(!preFrame.empty() && !myFrame.empty())
		{
			base_test1 = compareHist1(preFrame, myFrame);	//计算出两帧之间的相似度
			base_test1_str += toString(base_test1) +  "\n";
			cout<<"ffsd  "<<base_test1<<endl;
			if(base_test1 > 6)	//超过阈值,认为切换了场景
			{
				nnn++;
				situationCount = 0;	//切换场景后,统计帧数归零
				selectFace = false;
				vec_formerTemplates.clear();
				//waitKey(0);
				for (int i = 0; i < 20; i++)	//将统计切换场景后,匹配人脸情况的数组清零,重新统计
				{
					changeBackArray[i] = 0;
				}
			}else{
				situationCount++; //没切换场景后,帧数加1
			}
		}
		preFrame = myFrame.clone();	//保存当前帧

		double t,tt,ttt;
		t = (double)cvGetTickCount();
		int detectedCount = -1;	//当前帧的人脸数
		HImage hTargetImg;
		hTargetImg = loadImageFromMat(targetImg);
		/*imwrite("targetImg.jpg", targetImg);
		FSDK_LoadImageFromFile(&hTargetImg, "targetImg.jpg");*/
		FSDK_FaceTemplate *detectedFaceTemplates = new FSDK_FaceTemplate[20];
		TFacePosition *FaceArray;
			
		FaceArray = detectUsingLuxand(hTargetImg, detectedCount, detectedFaceTemplates, NULL);	//检测当前帧的人脸
			
		tt = (double)cvGetTickCount() - t;
		printf("detect time = %g ms\n", tt / ((double)cvGetTickFrequency()*1000.));

		if (detectedCount > 0)	//当前帧有人脸才去进行匹配替换
		{
			//绘制人脸检测矩形框
			if(detectFace)
			{
				for (int i = 0; i < detectedCount; i++)
				{
					TFacePosition facePosition = FaceArray[i];
					rectangle(targetImg, Rect(facePosition.xc - facePosition.w/2, facePosition.yc - facePosition.w/2, facePosition.w, facePosition.w), cvScalar(255, 0, 255), 2);		
				}
			}

			ttt = (double)cvGetTickCount();
			float temp_same_num_all = 0;
			float max_same_num_all = 0;
			int similiestFaceNum_all_cartoon = -1;
			int similiestFaceNum_all_man = -1;
			//从画面所有脸中找出与选定人脸最相似的一张脸
			for(int i = 0; i < vec_FaceCartoons.size(); i++)
			{
				FaceCartoon *faceCartoonObj = vec_FaceCartoons.at(i);
				float temp_same_num = 0;
				float max_same_num = 0;
				int similiestFaceNum = -1;
				for(int j = 0; j < detectedCount; j++)
				{
					for(int k = 0; k < faceCartoonObj->faceTemplates.size(); k++)
					{
						temp_same_num = getSimilarityBetweenTemplate(faceCartoonObj->faceTemplates.at(k), detectedFaceTemplates[j]);
						if(temp_same_num > max_same_num){
							similiestFaceNum = j;
							max_same_num = temp_same_num;
						}
					}

				}
				if(max_same_num > max_same_num_all)
				{
					max_same_num_all = max_same_num;
					similiestFaceNum_all_cartoon = similiestFaceNum;
					similiestFaceNum_all_man = i;
				}
				cout<<"相似度  "<<max_same_num<<endl;
				base_test1_str += "similirity: " + toString(max_same_num) +  "\n";
			}

			base_test1_str += "sitCount: " + toString(situationCount) +  "\n";
			base_test1_str += "sitCartNum: " + toString(situationCartoonNum) +  "\n";

			//场景没切换,已经贴了5帧,则这一帧也贴 //选脸替换的第一个场景,直接贴
			if ((base_test1 <= 6 && situationCartoonNum >= 5) || selectFace)
			{
				//切换场景后,通过前十帧来确定后续要显示的人脸
				//超过10帧,则以后都选取匹配最多的那个FcObj来卡通化
				if(situationCount > 10)	
				{
					base_test1_str += ">10";
					base_test1_str += "\n";
					int mostMatchManNum = 0, maxMatchTime = 0;
					for(int k = 0; k < 20; k++)	//选出前十帧匹配次数最多的那个人
					{
						if(changeBackArray[k] > maxMatchTime)
						{
							mostMatchManNum = k;
							maxMatchTime = changeBackArray[k];
						}
					}
					FaceCartoon *tempFaceObj = vec_FaceCartoons.at(mostMatchManNum);
					tempFaceObj->facePosition =  FaceArray[similiestFaceNum_all_cartoon];
					detect_and_draw(myFrame, *tempFaceObj);

					//如果当前帧的相似度低于阈值,则加入到模板中用于以后匹配
					if(max_same_num_all < threshhold){
						tempFaceObj->faceTemplates.push_back(detectedFaceTemplates[similiestFaceNum_all_cartoon]);
						base_test1_str += "add Temp \n";
					}

					//确定使用哪个FcObj之后,如果之前有未匹配的帧,则将其加入模板
					if(!vec_formerTemplates.empty()){
						base_test1_str += "use former!! \n";
						for(int i = 0; i < vec_formerTemplates.size(); i++){
							tempFaceObj->faceTemplates.push_back(vec_formerTemplates.at(i));
						}
						//使用完之后,清空
						vec_formerTemplates.clear();
					}
				}else{	//小于10帧,则使用当前最相似的那个FcObj
					base_test1_str += "<10";
					base_test1_str += "\n";
					changeBackArray[similiestFaceNum_all_man]++;

					FaceCartoon *faceCartoonObj = vec_FaceCartoons.at(similiestFaceNum_all_man);
					//faceCartoonObj->faceTemplate = detectedFaceTemplates[similiestFaceNum_all_cartoon];

					//为当前FcObj选取一个配对的卡通形象,如果是半自动的,则不会去找,是固定的
					matchCartoonForFcObj(faceCartoonObj, frameCount, base_test1_str);

					double drawTime = (double)cvGetTickCount();
					faceCartoonObj->facePosition =  FaceArray[similiestFaceNum_all_cartoon];
					detect_and_draw(myFrame, *faceCartoonObj);

					//如果当前帧的相似度低于阈值,则加入到模板中用于以后匹配
					if(max_same_num_all < threshhold){
						faceCartoonObj->faceTemplates.push_back(detectedFaceTemplates[similiestFaceNum_all_cartoon]);
						base_test1_str += "add temp \n";
					}
							
					printf("draw time = %g ms\n", ((double)cvGetTickCount() - drawTime) / ((double)cvGetTickFrequency()*1000.));
				}
				situationCartoonNum++;
			}else if((base_test1 > 6 || base_test1 == -1) || (situationCount < 10 ) || (base_test1 <= 6 && situationCartoonNum < 5 && situationCount < 20)){	//场景切换了 //选择第一个人脸 //该场景前10帧 //场景没切换,场景前20帧中贴图小于10帧
				cout<<"没吊用!"<<endl;
				if(base_test1 > 6 || base_test1 == -1){	 //场景切换了	//选择第一个人脸 
					situationCartoonNum = 0;
				}
					
				if(max_same_num_all > threshhold)	//如果当前帧人脸相似度超过阈值,则贴图
				{
					changeFlag = true;
					changeBackArray[similiestFaceNum_all_man]++;

					FaceCartoon *faceCartoonObj = vec_FaceCartoons.at(similiestFaceNum_all_man);
					//faceCartoonObj->faceTemplate = detectedFaceTemplates[similiestFaceNum_all_cartoon];

					//为当前FcObj选取一个配对的卡通形象
					matchCartoonForFcObj(faceCartoonObj, frameCount, base_test1_str);

					double drawTime = (double)cvGetTickCount();
					faceCartoonObj->facePosition =  FaceArray[similiestFaceNum_all_cartoon];
					detect_and_draw(myFrame, *faceCartoonObj);

					printf("draw time = %g ms\n", ((double)cvGetTickCount() - drawTime) / ((double)cvGetTickFrequency()*1000.));
					situationCartoonNum++;
				}else{	//不相似则当前帧不贴图
					changeFlag = false;

					//保存该帧模板
					vec_formerTemplates.push_back(detectedFaceTemplates[similiestFaceNum_all_cartoon]);
					base_test1_str += "add to former \n";
				}
			}else if(base_test1 <= 6 && !changeFlag){	//场景没切换,上一帧没贴图
				base_test1_str += "enter: " + toString(38) + "\n";
			}
			base_test1_str += "changeFlag: " + toString((double)changeFlag)  + "\n";
			base_test1_str += "simi: " + toString(simiAuto) + "\n";

			t = (double)cvGetTickCount() - t;
			totalTime = t / ((double)cvGetTickFrequency()*1000.);
			base_test1_str += "totalTime: " + toString(totalTime) + "\n";
			if(drawStringFlag)
				drawString(myFrame, base_test1_str);

			ttt = (double)cvGetTickCount() - ttt;
			printf("similarity time = %g ms\n", ttt / ((double)cvGetTickFrequency()*1000.));
				
			//显示这个最像的人脸
			//Rect faceRegionRect = getROIFromTFacePosition(targetImg, FaceArray[similiestFaceNum]);
			//Mat selected_faceImg(targetImg, faceRegionRect);
			//imshow("检测出的人脸", selected_faceImg);
		}
		base_test1_str = "";	//清空参数表
		FSDK_FreeImage(hTargetImg);
		free(detectedFaceTemplates);

	}
	
	if (theApp->dlg.key_esc) {	//如果按下Esc键中断   
		theApp->dlg.key_esc = false;
		theApp->dlg.KillTimer(1);
		if(saveSelectedTemplates)	//清空和关闭输出流
		{
			fout.clear();
			fout.close();
		}
		return;
	}else if (theApp->dlg.key_space){	//按下空格键取消卡通化操作
		theApp->dlg.key_space = false;
		if(grabFlag)
		{
			grabFlag = false;
		}else{
			grabFlag = true;
		}
			
	}
	else if (theApp->dlg.key_enter){
		theApp->dlg.KillTimer(1);	//暂停
		cout << "按了enter" << endl;
		judge_Image = myFrame.clone();
		int grabReturn = grab(originalMat, name, selected_rect);	//按下enter则暂停,去调用抠图程序,selected_rect为选中的区域
		
		if(grabReturn == GRAB_OK)	//判断是否抠图成功
		{
			int tempCount = -1;	//当前帧人脸个数
			HImage h_judge_image;
			h_judge_image = loadImageFromMat(judge_Image);
			/*imwrite("judgeImg.jpg", judge_Image);
			FSDK_LoadImageFromFile(&h_judge_image, "judgeImg.jpg");*/
			//FSDK_SaveImageToFile(h_judge_image,"multiddddddddddddd.jpg");
			FSDK_FaceTemplate *judgeFaceTemplates = new FSDK_FaceTemplate[20];
			TFacePosition *judgeFaceArray;
			int genders[20];
			judgeFaceArray = detectUsingLuxand(h_judge_image, tempCount, judgeFaceTemplates, genders);

			int center_x = -1;
			int center_y = -1;
			if(tempCount > 0){
				selectFace = true;
				situationCount = 0;

				//计算区域中心
				center_x = selected_rect.x + selected_rect.width / 2;
				center_y = selected_rect.y + selected_rect.height / 2;
				//区域中心落在哪个人脸区域则选的哪个人脸
				int minDist = INT_MAX, closestFaceNum = -1;
				for (int i = 0; i < tempCount; i++)
				{
					int dist = myDist(center_x, center_y, judgeFaceArray[i].xc, judgeFaceArray[i].yc);
					if(dist < minDist)
					{
						minDist = dist;
						closestFaceNum = i;
					}
				}
				//FaceCartoon faceCartoon;	//创建对象,这样不行,每次创建的地址都相同
				FaceCartoon *faceCartoon = new FaceCartoon();	
				faceCartoon->cartoonNumArray = new int[vec_faceTemplates.size()];	//创建对应动画个数的数组大小
				//数组归零
				for(int i = 0; i < vec_faceTemplates.size(); i++)
				{
					faceCartoon->cartoonNumArray[i] = 0;
				}
				faceCartoon->faceTemplates.push_back(judgeFaceTemplates[closestFaceNum]);	//获取目标人脸模板
				faceCartoon->gender = genders[closestFaceNum];
				//找出最相似的人,以及卡通
				int *cartoonNums = findMatchCartoonNum(*faceCartoon);
				for(int i = 0; i< vec_faceTemplates.size(); i++) cout<< cartoonNums[i] << " ";
				cout<<"fwff"<<endl;
				int cartoonNum;
				if(simiAuto){
					theApp->selectCartoonDlg.cartoonNums = cartoonNums;
					theApp->selectCartoonDlg.DoModal();

					cartoonNum = theApp->selectCartoonDlg.getSelectedCartoonNum();
					//cartoonNum = showSelectCartoonWindow(cartoonNums, simiNum);
				}else{
					cartoonNum = cartoonNums[0];	//最相似的一个
				}

				faceCartoon->cartoonMatchNum = cartoonNum;
				faceCartoon->cartoonNumArray[cartoonNum]++;

				grabFlag = true;
				vec_FaceCartoons.push_back(faceCartoon);

				if(saveSelectedTemplates)	//是否保存模板
				{
					fout.write((char *)(&judgeFaceTemplates[closestFaceNum]), sizeof(judgeFaceTemplates[closestFaceNum])); 
				}
					
				//画出标定的人脸
				Rect faceRegionRect = getROIFromTFacePosition(judge_Image, judgeFaceArray[closestFaceNum]);
				Mat selected_faceImg(judge_Image, faceRegionRect);
					
				FSDK_FreeImage(h_judge_image);
			}else{
				cout << "当前图像没有检测出人脸!!" << endl;
			}
		}
	}

	//imshow("视频播放", myFrame);
	IplImage *originalImg, *cartoonImg;
	originalImg = &IplImage(targetImg);
	cartoonImg = &IplImage(myFrame);
	theApp->dlg.DrawPicToHDC(cartoonImg, IDC_showcartoon);
	theApp->dlg.DrawPicToHDC(originalImg, IDC_showOriginal);
	if(createVideo)
	{
		//合成视频
		outputVideo.write(myFrame);
	}
	//重启计时器
	if(theApp->dlg.key_enter == true){
		theApp->dlg.key_enter = false;
		theApp->dlg.SetTimer(1,1000/fps,NULL);  	//开始
		theApp->dlg.m_playBtn.SetWindowTextA("暂停");
		theApp->dlg.isPlay = true;
	}
	processedFrame = true;
}
int main(int argc, const char** argv)
{
    // Parse command line arguments
    CommandLineParser parser(argc, argv, params);

    // If help flag is given, print help message and exit
    if (parser.get<bool>("help"))
    {
        parser.printParams();
        return 1;
    }

    string image_file = parser.get<string>("image");
    string video_file = parser.get<string>("video");
    bool use_camera = parser.get<bool>("camera");
    string border = parser.get<string>("border");
    string scratches = parser.get<string>("scratches");

    RetroFilter::Parameters params;
    params.fuzzyBorder = imread(border, 0);
    params.scratches   = imread(scratches, 0);

    if (params.fuzzyBorder.empty())
        cout << "Error: failed to open image to use as a border: " << border << endl;
    if (params.scratches.empty())
        cout << "Error: failed to open image to use for scratches: " << scratches << endl;
    if (params.fuzzyBorder.empty() || params.scratches.empty())
        return 1;

    VideoCapture capture;
    {   
        if (!video_file.empty())
        {
            capture.open(video_file);
            if (!capture.isOpened())
            {
                cout << "Error: failed to open video stream for: " << video_file << endl;
                return 1;
            }
        }
        else if (use_camera)
        {
            capture.open(0);
            if (!capture.isOpened())
            {
                cout << "Error: failed to open video stream for camera #0" << endl;
                return 1;
            }
        }
        else
        {
            cout << "Error: declare a source of images" << endl;
            parser.printParams();
            return 1;
        }
    }

    Mat frame;
    capture >> frame;

    while (frame.empty())
    {
        // empty video; lets consider this to be OK
        //return 0;
        capture >> frame;
    }

    params.frameSize   = frame.size();
    RetroFilter filter(params);

	VideoWriter captur;
	captur.open("kol.avi",8,1.0/24.0,static_cast<Size>(frame.size()));
	if(!captur.isOpened()) std::cout<<"not opened";

    long sh=0;
    for(;;)
    {
        Mat retroFrame;
        //TS(filter);
        filter.applyToVideo(frame, retroFrame);
        //TE(filter);

        imshow("Original Movie", frame);
        imshow("Retro Movie", retroFrame);
        char c = (char) waitKey(1);
        if( c == 27 ) // Esc
            break;

        capture >> frame;
		captur << frame;
        if(frame.empty()) break;
		if(sh>50) break;
		sh++;
		//std::cout<<sh<<"\n";
    }

    return 0;
}
int main(int argc, const char **argv)
{

    VideoCapture cap;
    Tracker objTracker;

    CommandLineParser parser(argc, argv, keys);
    if (parser.has("help")) {
        help();
        return 0;
    }

    cap.open(argv[1]);
    if (!cap.isOpened()) {
        help();
        cout << "***Could not access file...***\n";
        return -1;
    }
    Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH),    //Acquire input size
                  (int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));

    cout << hot_keys;
    bool paused = false;

    Mat frame;
    cap >> frame;

    objTracker.Init(S, Tracker::InitParams());

    int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC));
    VideoWriter outputVideo;
    // outputVideo.open("output.mp4" , ex, cap.get(CV_CAP_PROP_FPS), S, true);

    Mat out;
    try {

        while (1) {

            if (!paused && Tracker::g_initTracking) {
                cap >> frame;
                if (frame.empty())
                    break;
            }

            if (!paused) {


                objTracker.ProcessFrame(frame, out);

            }
            imshow("CamShift", out);
            // outputVideo << out;

            char c = (char)waitKey(10);
            if (c == 27)
                break;
            switch (c) {
            case 'b':
                objTracker.ToggleShowBackproject();
                break;
            case 'c':
                // trackObject = 0;
                // histimg = Scalar::all(0);
                break;
            case 'h':
                objTracker.HideControlsGUI();
            //     showHist = !showHist;    
            //     if (!showHist)
            //         destroyWindow("Histogram");
            //     else
            //         namedWindow("Histogram", 1);
            //     break;
            case 'p':
                paused = !paused;
                break;
            case 'r':
                cap.set(CV_CAP_PROP_POS_AVI_RATIO, 0);
                // outputVideo.set(CV_CAP_PROP_POS_AVI_RATIO, 0);
                cap >> frame;
                objTracker.Init(S, Tracker::InitParams());

                break;
            default:
                ;
            }
        }
    }

    catch (const cv::Exception &e) {
        std::cerr << e.what();
        cap.release();
        outputVideo.release();

        return 1;
    }
    cap.release();
    outputVideo.release();

    return 0;
}
int main(int argc, char** argv)
{
	if(argc >= 3)
	{
		VideoCapture inputVideo(argv[1]); // open the default camera
		if(!inputVideo.isOpened())  // check if we succeeded
		    return -1; 
		
		// Initialize
	    VideoWriter outputVideo;  // Open the output
	    const string source      = argv[2];                                // the source file name
		const string NAME = source + ".mp4";   // Form the new name with container
	    int ex = inputVideo.get(CV_CAP_PROP_FOURCC);                       // Get Codec Type- Int form
		std::cout << ex << "\n" << (int)inputVideo.get(CV_CAP_PROP_FOURCC) << "\n";
    	Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH),       //Acquire input size
        	          (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT));    
		outputVideo.open(NAME, ex, inputVideo.get(CV_CAP_PROP_FPS), S, false);
    	char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0};
	    cout << "Input codec type: " << EXT << endl;

        if (!outputVideo.isOpened())
        {
            cout  << "Could not open the output video for write \n";
            return -1;
        }
        
		namedWindow("Result Window", 1);
		
		// Mat declaration
		Mat prev_frame, prev_gray, cur_frame, cur_gray;
        Mat frame_blurred, frameHSV;
        
        // take the first frame
        inputVideo >> prev_frame;
				
        /* manual ball selection */
        MouseParams mp;
        prev_frame.copyTo( mp.ori ); 
        prev_frame.copyTo( mp.img ); 
        setMouseCallback("Result Window", BallSelectFunc, &mp );

		int enterkey = 0;
		while(enterkey != 32 && enterkey != 113)
		{
			enterkey = waitKey(30) & 0xFF;
        	imshow("Result Window", mp.img);
		}
		
		outputVideo.write( mp.img );
		/* Kalman Filter
		   Kalman filter is a prediction-correction filter. It has two stages: predict and correct.
		   In predict stage, the filter uses the states of previous frame to predict the
		   state of current frame. In correct stage, the filter takes in current measurement
		   to "correct" the prediction made in prediction stage. 
		   Here we are using an adaptive Kalman filter to do ball tracking.
		   (noise matrix P, Q changes depending on the occulusion index)
		*/
		
		/* Initialization
		   four parameters:  x, y, vx, vy
		   two measurements: mx, my
		   Here we're implementing a constant velocity model.
		   x_t = x_t-1 + vx_t-1;
		   y_t = y_t-1 + vy_t-1;
		   vx_t = vx_t-1;
		   vy_t = vy_t-1;
		   These linear equations can be written as transition matrix A.
		*/
		KalmanFilter KF(4, 2, 0);
		float transMatrixData[16] = {1,0,1,0, 
		                             0,1,0,1,
		                             0,0,1,0,
		                             0,0,0,1};
		                             
		KF.transitionMatrix = Mat(4, 4, CV_32F, transMatrixData);
		Mat_<float> measurement(2,1);
		measurement.setTo(Scalar(0));
		
		/* We put the first point in predicted state */
		KF.statePost.at<float>(0) = mp.pt.x;
		KF.statePost.at<float>(1) = mp.pt.y;
		KF.statePost.at<float>(2) = 0;
		KF.statePost.at<float>(3) = 0;
		setIdentity(KF.measurementMatrix);                        // measurement matrix H
		setIdentity(KF.processNoiseCov, Scalar::all(1e-4));       // process noise covariance matrix Q
		setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1));   // measurement noise covariance matrix R
		// priori error estimate covariance matrix P'(t)		
		/*
		KF.errorCovPre.at<float>(0)  = 1;
		KF.errorCovPre.at<float>(5)  = 1;
		KF.errorCovPre.at<float>(10) = 1;
		KF.errorCovPre.at<float>(15) = 1;   
		*/
		setIdentity(KF.errorCovPre);                              // priori error estimate covariance matrix P'(t)	
		setIdentity(KF.errorCovPost, Scalar::all(.1));            // posteriori error estimate cov matrix P(t)
	
		/* params related to previous frames */
		Rect    prev_box;
		Point2f prev_motion;
		Point   noFoundStartPt;
        vector<cv::Point2f> prev_ball_centers;
        int noFoundCount = 0;
        
        /* start tracking */		
		setMouseCallback("Result Window", CallBackFunc, &frameHSV);			
        for(int frame_num=1; frame_num < inputVideo.get(CAP_PROP_FRAME_COUNT); ++frame_num)
        {
        	cout << "===FRAME #" << frame_num << "===" << endl;
        	
        	/* get current frame */
            inputVideo >> cur_frame;
            
            // Blur & convert frame to HSV color space
            cv::GaussianBlur(cur_frame, frame_blurred, cv::Size(5, 5), 3.0, 3.0);
            cvtColor(frame_blurred, frameHSV, COLOR_BGR2HSV);
            
            // gray scale current frame
    		cvtColor(prev_frame, prev_gray, CV_BGR2GRAY);            
    		cvtColor(cur_frame, cur_gray, CV_BGR2GRAY);
            
            // mask generation
            Mat mask;
			mask = getMask(frameHSV);

			// Hough Transform
			Mat frame_filtered, frame_filtered_gray;
            cur_frame.copyTo( frame_filtered, mask );
            cv::cvtColor( frame_filtered, frame_filtered_gray, CV_BGR2GRAY );
            vector<cv::Vec3f> circles;
            cv::GaussianBlur(frame_filtered_gray, frame_filtered_gray, cv::Size(5, 5), 3.0, 3.0);
            HoughCircles( frame_filtered_gray, circles, CV_HOUGH_GRADIENT, 1, frame_filtered_gray.rows/8, 120, 18, 5,300);
			
            // contour generation
            vector< vector<cv::Point> > contours_ball;
            cv::findContours(mask, contours_ball, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
            
            Mat result;
            cur_frame.copyTo( result );

			// OpticalFlow
            vector<Point2f> optFlow_ball_centers;
            vector<uchar> featuresFound;
            Mat err;
            TermCriteria termcrit(TermCriteria::COUNT|TermCriteria::EPS, 20, 0.03);
            Size winSize(31, 31);
            if( prev_ball_centers.size() > 0 )
                calcOpticalFlowPyrLK(prev_gray, cur_gray, prev_ball_centers, optFlow_ball_centers, featuresFound, err, winSize, 0, termcrit, 0, 0.001);    
            
            // Kalman Filter: Extract previous point & prediction point
            Point2f statePt   = Point( KF.statePost.at<float>(0), KF.statePost.at<float>(1) );  
            Mat prediction    = KF.predict();  
            Point2f predictPt = Point2f( prediction.at<float>(0), prediction.at<float>(1) );

		    cout << "state:" << statePt << endl;
		    cout << "predict:" << predictPt << endl;
			cout << "prev_motion: " << prev_motion << " sqr: " << prev_motion.x * prev_motion.x + prev_motion.y * prev_motion.y << endl;
            
            // Search current frame for good candidate measurements
            vector<Point2f>   cur_contour_centers;
            vector<cv::Point> best_ball_contour;
            Point2f best_ball_center;
            Rect    best_ball_box;
			bool 	ballFound = false;
			
			// TODO dynamic search range
			int closest_dist = (prev_motion.x * prev_motion.x + prev_motion.y * prev_motion.y) * 15;
	    	if(closest_dist == 0) closest_dist = 10000;
	    	//circle( result, predictPt, sqrt(closest_dist), CV_RGB(255,255,0), 2 );
			
            for (size_t i = 0; i < contours_ball.size(); i++)
			{
			    drawContours(result, contours_ball, i, CV_RGB(255,0,0), 1);  // draw the area
			     
		        cv::Rect bBox;
		        bBox = cv::boundingRect(contours_ball[i]);
			    Point2f center;
			    center.x = bBox.x + bBox.width / 2;
			    center.y = bBox.y + bBox.height / 2;		         

				cur_contour_centers.push_back(center);
				
				// find corresponding optical flow center
				float optFlow_dist = 2500;
				int   best_j = -1;
				for( size_t j=0; j < optFlow_ball_centers.size(); ++j )
				{
		        	float diff_x = center.x - optFlow_ball_centers[j].x;
		        	float diff_y = center.y - optFlow_ball_centers[j].y;
		        	float distance  = diff_x * diff_x + diff_y * diff_y;
					if(distance < optFlow_dist)
					{
						distance = optFlow_dist;
						best_j   = j;
					}
			    }
			    
				/* TODO
				Point2f optPredictPt = center;
				if(best_j != -1)
				{
					Point2f motion = optFlow_ball_centers[best_j] - prev_ball_centers[best_j];
					optPredictPt = center + motion;
					line( result, optPredictPt, center, CV_RGB(190,60,70), 2 );
				}
				*/
					
		        // If we find a contour that includes our prediction point,
		        // it's the best choice then.
				// If we cannot found a contour to contain prediction point, 
				// we search the rest contours. The one with closest distance
				// should be picked.
				if( pointPolygonTest( contours_ball[i], predictPt, false ) >= 0)
				{
					best_ball_contour = contours_ball[i];
					best_ball_center  = center;
					best_ball_box     = bBox;
					ballFound = true;
					break;
				}
				else 
				{
		        	float diff_x = center.x - predictPt.x;
		        	float diff_y = center.y - predictPt.y;
		        	float distance  = diff_x * diff_x + diff_y * diff_y;
					
					//if( bBox.area() < 200 )
					//	continue;
					/*
					stringstream sstr;
					sstr << "(dot= " << dot_product << ")";
					cv::putText(result, sstr.str(),
					cv::Point(center.x + 3, center.y - 3),
					cv::FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(0,255,100), 2);						
					*/
					
					//if(bBox.area() < 250)
					//	continue;
											
					// if distance is close enough
					if( distance < closest_dist )
					{
						best_ball_contour = contours_ball[i];
						best_ball_center  = center;
						best_ball_box     = bBox;		
						closest_dist      = distance;
						ballFound = true;
					}
				}
            }
	
			if(ballFound)
	        {
	        	// calculte occulusion rate
			    float occ = fabs( (float)best_ball_box.area() / (float)prev_box.area() - 1.0 );
			    if(occ > 1.0) occ = 1.0;
				
				// check threshold
				float threshold = 0.3;
				if(occ < threshold)
				{				
					setIdentity(KF.processNoiseCov, Scalar::all(1.0-occ));  // Q = 1 - occ
					setIdentity(KF.measurementNoiseCov, Scalar::all(occ));  // R = occ    
				}
				else
				{
					setIdentity(KF.processNoiseCov, Scalar::all(0.0));      // Q = 0
					setIdentity(KF.measurementNoiseCov, Scalar::all(1e10)); // R = infinite			    				
					cout << "NON_CONFIDENTIAL_MEASUREMENT\n";
				}
				
				// correction
			    measurement.at<float>(0) = best_ball_center.x;  
				measurement.at<float>(1) = best_ball_center.y;  
				Mat estimated = KF.correct(measurement);
			
				cout << "measured:" << best_ball_center << endl;
				cout << "estimated:" << estimated.at<float>(0) << ", " << estimated.at<float>(1) << endl;
          	
				// remember to update prev parameters
				prev_box     = best_ball_box;
				prev_motion  = best_ball_center - statePt;
				noFoundCount = 0;
		    } 
		    else
		    {
				// TODO
				prev_motion = predictPt - statePt;
				
				if( noFoundCount == 0 )
				{
					noFoundStartPt = statePt;
				}
    		    circle( result, noFoundStartPt, 5, CV_RGB(255,255,255), 2 );
				
				// if Kalman filter failed... we "CORRECT" the frame
				if(noFoundCount > 1)
				{
					closest_dist = 1e8;
				    for( size_t i = 0; i < circles.size(); i++ )
				    {                
				        Point center_circle(cvRound(circles[i][0]), cvRound(circles[i][1]));
				        int radius_circle = cvRound(circles[i][2]);
						if( radius_circle < 6 )
							continue;
						/*	
						cv::Rect bBox;
						bBox = cv::boundingRect(circles[i]);
						Point center;
						center.x = bBox.x + bBox.width / 2;
						center.y = bBox.y + bBox.height / 2;		         
			    		*/
				    	int diff_x = center_circle.x - noFoundStartPt.x;
				    	int diff_y = center_circle.y - noFoundStartPt.y;
				    	int distance  = diff_x * diff_x + diff_y * diff_y;

						if( distance < closest_dist)
						{
							closest_dist = distance;
							best_ball_center = center_circle;
							//best_ball_box    = bBox;
							ballFound = true;						
						}
				    }
				    
				    if(ballFound)
				    {
	    			    //measurement.at<float>(0) = best_ball_center.x;  
						//measurement.at<float>(1) = best_ball_center.y;  
	    				//Mat estimated = KF.correct(measurement);	
						KF.statePost.at<float>(0) = best_ball_center.x;
						KF.statePost.at<float>(1) = best_ball_center.y;
						KF.statePost.at<float>(2) = 0;
						KF.statePost.at<float>(3) = 0;

						prev_box     = best_ball_box;
						prev_motion  = Point2f(0, 0);
				    	noFoundCount = 0;
				    }
				    else {
				    	cout << "UNABLE TO CORRECT..." << endl;
				    }
				}
				noFoundCount++;
				cout << "NO FOUND: " << noFoundCount << endl;
		    }
		    
		    // rendering result
			line( result, statePt, predictPt, CV_RGB(255,0,255), 2 );	
	    	circle( result, predictPt, 2, CV_RGB(255,0,255), 2 );	         
		    circle( result, best_ball_center, 2, CV_RGB(255,255,255), 2 );
		    rectangle( result, best_ball_box, CV_RGB(0,255,0), 2 );

			// Optical Flow   
            /*
            for (size_t i = 0; i < optFlow_ball_centers.size(); i++)
			{
				line( result, prev_ball_centers[i], optFlow_ball_centers[i], CV_RGB(120,70,255), 2 );
		    	circle( result, optFlow_ball_centers[i], 2, CV_RGB(120,70,255), 2 );
            }			   
			*/
			
			// Hough
            /*
            for( size_t circle_i = 0; circle_i < circles.size(); circle_i++ )
            {                
                Point center(cvRound(circles[circle_i][0]), cvRound(circles[circle_i][1]));
                int radius = cvRound(circles[circle_i][2]);
                circle( result, center, radius, Scalar(12,12,255), 2 );
            }			
			*/
			
			prev_ball_centers = cur_contour_centers;
			
		    imshow("Result Window", result);
 			outputVideo.write( result );
          
            /* UPDATE FRAME */
            cur_frame.copyTo( prev_frame );
            
            /* KEY INPUTS */
            int keynum = waitKey(30) & 0xFF;
            if(keynum == 113)      // press q
            	break;
            else if(keynum == 32)  // press space
			{
				keynum = 0;
				while(keynum != 32 && keynum != 113)
					keynum = waitKey(30) & 0xFF;
				if(keynum == 113) 
					break;
			}
        }
        inputVideo.release();
        outputVideo.release();
    }
Exemple #26
0
int main(int argc, const char* argv[])
{
    CommandLineParser cmd(argc, argv,
        "{ v video      |           | Input video }"
        "{ o output     |           | Output video }"
        "{ s scale      | 4         | Scale factor }"
        "{ i iterations | 180       | Iteration count }"
        "{ t temporal   | 4         | Radius of the temporal search area }"
        "{ f flow       | farneback | Optical flow algorithm (farneback, simple, tvl1, brox, pyrlk) }"
        "{ g            | false     | CPU as default device, cuda for CUDA }"
        "{ h help       | false     | Print help message }"
    );

    if (cmd.get<bool>("help"))
    {
        cout << "This sample demonstrates Super Resolution algorithms for video sequence" << endl;
        cmd.printMessage();
        return EXIT_SUCCESS;
    }

    const string inputVideoName = cmd.get<string>("video");
    const string outputVideoName = cmd.get<string>("output");
    const int scale = cmd.get<int>("scale");
    const int iterations = cmd.get<int>("iterations");
    const int temporalAreaRadius = cmd.get<int>("temporal");
    const string optFlow = cmd.get<string>("flow");
    string gpuOption = cmd.get<string>("gpu");

    std::transform(gpuOption.begin(), gpuOption.end(), gpuOption.begin(), ::tolower);

    bool useCuda = gpuOption.compare("cuda") == 0;
    Ptr<SuperResolution> superRes;

    if (useCuda)
        superRes = createSuperResolution_BTVL1_CUDA();
    else
        superRes = createSuperResolution_BTVL1();

    Ptr<DenseOpticalFlowExt> of = createOptFlow(optFlow, useCuda);

    if (of.empty())
        return EXIT_FAILURE;
    superRes->set("opticalFlow", of);

    superRes->set("scale", scale);
    superRes->set("iterations", iterations);
    superRes->set("temporalAreaRadius", temporalAreaRadius);

    Ptr<FrameSource> frameSource;
    if (useCuda)
    {
        // Try to use gpu Video Decoding
        try
        {
            frameSource = createFrameSource_Video_CUDA(inputVideoName);
            Mat frame;
            frameSource->nextFrame(frame);
        }
        catch (const cv::Exception&)
        {
            frameSource.release();
        }
    }
    if (!frameSource)
        frameSource = createFrameSource_Video(inputVideoName);

    // skip first frame, it is usually corrupted
    {
        Mat frame;
        frameSource->nextFrame(frame);
        cout << "Input           : " << inputVideoName << " " << frame.size() << endl;
        cout << "Scale factor    : " << scale << endl;
        cout << "Iterations      : " << iterations << endl;
        cout << "Temporal radius : " << temporalAreaRadius << endl;
        cout << "Optical Flow    : " << optFlow << endl;
        cout << "Mode            : " << (useCuda ? "CUDA" : "CPU") << endl;
    }

    superRes->setInput(frameSource);

    VideoWriter writer;

    for (int i = 0;; ++i)
    {
        cout << '[' << setw(3) << i << "] : ";
        Mat result;

        MEASURE_TIME(superRes->nextFrame(result));

        if (result.empty())
            break;

        imshow("Super Resolution", result);

        if (waitKey(1000) > 0)
            break;

        if (!outputVideoName.empty())
        {
            if (!writer.isOpened())
                writer.open(outputVideoName, VideoWriter::fourcc('X', 'V', 'I', 'D'), 25.0, result.size());
            writer << result;
        }
    }

    return 0;
}
Exemple #27
0
	~GenericClassnameOneTracker9000()
	{
		tracking_recorder.release();
		logger.close();
	}
int main( int argc, const char** argv )
{
    CvCapture* capture = 0;
	VideoWriter vw;    
    Mat frame, frameCopy, image;
    Mat wframe;
    template1= imread("pupil_template1.png");
    const string scaleOpt = "--scale=";
    size_t scaleOptLen = scaleOpt.length();
    const string cascadeOpt = "--cascade=";
    size_t cascadeOptLen = cascadeOpt.length();
    const string nestedCascadeOpt = "--nested-cascade";
    size_t nestedCascadeOptLen = nestedCascadeOpt.length();
    const string tryFlipOpt = "--try-flip";
    size_t tryFlipOptLen = tryFlipOpt.length();
    string inputName;
    bool tryflip = false;
	vw.open("b.avi",CV_FOURCC('D','I','V','X'),10,Size(1280,720));
    help();

    CascadeClassifier cascade, nestedCascade;
    double scale = 1;

    for( int i = 1; i < argc; i++ )
    {
        cout << "Processing " << i << " " <<  argv[i] << endl;
        if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
        {
            cascadeName.assign( argv[i] + cascadeOptLen );
            cout << "  from which we have cascadeName= " << cascadeName << endl;
        }
        else if( nestedCascadeOpt.compare( 0, nestedCascadeOptLen, argv[i], nestedCascadeOptLen ) == 0 )
        {
            if( argv[i][nestedCascadeOpt.length()] == '=' )
                nestedCascadeName.assign( argv[i] + nestedCascadeOpt.length() + 1 );
            if( !nestedCascade.load( nestedCascadeName ) )
                cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
        }
        else if( scaleOpt.compare( 0, scaleOptLen, argv[i], scaleOptLen ) == 0 )
        {
            if( !sscanf( argv[i] + scaleOpt.length(), "%lf", &scale ) || scale < 1 )
                scale = 1;
            cout << " from which we read scale = " << scale << endl;
        }
        else if( tryFlipOpt.compare( 0, tryFlipOptLen, argv[i], tryFlipOptLen ) == 0 )
        {
            tryflip = true;
            cout << " will try to flip image horizontally to detect assymetric objects\n";
        }
        else if( argv[i][0] == '-' )
        {
            cerr << "WARNING: Unknown option %s" << argv[i] << endl;
        }
        else
            inputName.assign( argv[i] );
    }

    if( !cascade.load( cascadeName ) )
    {
        cerr << "ERROR: Could not load classifier cascade" << endl;
        help();
        return -1;
    }

    if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
    {
        capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
        int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ;
        if(!capture) cout << "Capture from CAM " <<  c << " didn't work" << endl;
    }
    else if( inputName.size() )
    {
        image = imread( inputName, 1 );
        if( image.empty() )
        {
            capture = cvCaptureFromAVI( inputName.c_str() );
            if(!capture) cout << "Capture from AVI didn't work" << endl;
        }
    }
    else
    {
        image = imread( "lena.jpg", 1 );
        if(image.empty()) cout << "Couldn't read lena.jpg" << endl;
    }

    cvNamedWindow( "result", 1 );

    if( capture )
    {
        cout << "In capture ..." << endl;
        for(;;)
        {
            IplImage* iplImg = cvQueryFrame( capture );
            frame = cv::cvarrToMat(iplImg);
            if( frame.empty() )
                break;
            if( iplImg->origin == IPL_ORIGIN_TL )
                frame.copyTo( frameCopy );
            else
                flip( frame, frameCopy, 0 );

            wframe = detectAndDraw( frameCopy, cascade, nestedCascade, scale, tryflip  );
   	         
       vw << wframe;
			c= waitKey(10);
			
            if( c=='e' )
                
                {
                	return 0;
                	
                	goto _cleanup_;
                	
                }
                
        }

        waitKey(0);

_cleanup_:
        cvReleaseCapture( &capture );
        vw.release();
    }
    else
    {
        cout << "In image read" << endl;
        if( !image.empty() )
        {
            wframe = detectAndDraw( image, cascade, nestedCascade, scale, tryflip  );
            waitKey(0);
        }
        else if( !inputName.empty() )
        {
            /* assume it is a text file containing the
            list of the image filenames to be processed - one per line */
            FILE* f = fopen( inputName.c_str(), "rt" );
            if( f )
            {
                char buf[1000+1];
                while( fgets( buf, 1000, f ) )
                {
                    int len = (int)strlen(buf), c;
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';
                    cout << "file " << buf << endl;
                    image = imread( buf, 1 );
                    if( !image.empty() )
                    {
                        wframe = detectAndDraw( image, cascade, nestedCascade, scale, tryflip  );
                        c = waitKey(0);
                        if( c == 27 || c == 'q' || c == 'Q' )
                            break;
                    }
                    else
                    {
                        cerr << "Aw snap, couldn't read image " << buf << endl;
                    }
                }
                fclose(f);
            }
        }
    }

    cvDestroyWindow("result");
	
    return 0;
}
Exemple #29
0
int main(int argc, char **argv){
	VideoWriter outputVideo;
	Size S = Size(2*extended_resolution,extended_resolution);
	outputVideo.open("test.avi",  CV_FOURCC('M','J','P','G'), 10, S, true);
    if (!outputVideo.isOpened())
    {
        cout  << "Could not open the output video for write: " << endl;
        return 0;
    }
    unsigned char buffer[1024];
    /* Initiate a timer for resetting people count at midnight */
    atomic<bool> timer_keeps_running {true} ;
	if(reset_count_at_midnight) {
		int initial_wait_time = compute_remaining_time_of_today();
		thread( reset_occupancy_count, initial_wait_time, std::ref(timer_keeps_running) ).detach() ; 
	}
	
	XMPPWrapper_.report_updated_people_count(-200, people_inside, people_inside - person_count_last); //logging + reporting initial count
    if(ENABLE_ACTUATION)
    {
    	signal(SIGUSR1, update_people_count_handler);
    }
    if(LIVE == false){
		cout << "reading from file" << endl;
		read_from_file(input_file_name,outputVideo);
		return 0;
	}
	//GridEyeReader sensorReader;
  	signal(SIGINT, sigintHandler);
 	map<int,int**> frames;
    print_time();
    vector<Person> people;
    sensorReader.Init();
    auto begin = chrono::high_resolution_clock::now();    
    OccupancyCounter_.set_people_inside(0);
    person_count_last = 0;
    while (!exitRequested)
    {
    	auto end = chrono::high_resolution_clock::now();
        auto dur = end - begin;
        auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(dur).count();
        //cout << "ms passed is " << ms << endl;
        //usleep(std::max(0,(int)(100 - ms)) * 1000);
        if(ms > 100)
            cout << "time passed " << ms << endl;
        int f = sensorReader.ReadFrame(buffer);
        //cout << "sensere reader returns " << f <<"bytes" << endl;
        begin = chrono::high_resolution_clock::now();
       if( f > 130 )
       {
            //cout << "sensor reads  " << f << "bytes "<<endl;
            frames = sensorReader.interpret_data(buffer, f);
            for(map<int,int**>::iterator it=frames.begin();it!=frames.end();it++){
				Mat im = OccupancyCounter_.convert_to_Mat(it->second);
				Mat extended_im = OccupancyCounter_.resize_frame(im,it->first);
				if(blobDetection)
					OccupancyCounter_.blob_detect(extended_im);
				if(contourDetection)
					OccupancyCounter_.process_frame(extended_im,it->first,people,outputVideo,im);
            }
            frames.clear();
        }
        people_inside = OccupancyCounter_.get_people_inside();
        if(ENABLE_XMPP_REPORTING)
            if(people_inside != person_count_last)
		      XMPPWrapper_.occupancyChange(people_inside, people_inside - person_count_last);
		person_count_last = people_inside;
    }
    signal(SIGINT, SIG_DFL);
    retval =  EXIT_SUCCESS;
    
    sensorReader.~GridEyeReader();
    return retval;
}
Exemple #30
0
int main(int argc, char ** argv)
{
    if (!parseArguments(argc, argv))
    {
        showHelp(argv[0], false);
        return -1;
    }
    VideoCapture cap(GlobalArgs.deviceName);
    if (!cap.isOpened())
    {
        cout << "Cannot find device " << GlobalArgs.deviceName << endl;
        showHelp(argv[0], false);
        return -1;
    }

    VideoWriter videoWriter;
    Mat frame;
    FocusState state = createInitialState();
    bool focus = true;
    bool lastSucceeded = true;
    namedWindow(windowOriginal, 1);

    // Get settings:
    if (GlobalArgs.verbose)
    {
        if ((cap.get(CAP_PROP_GPHOTO2_WIDGET_ENUMERATE) == 0)
                || (cap.get(CAP_PROP_GPHOTO2_WIDGET_ENUMERATE) == -1))
        {
            // Some VideoCapture implementations can return -1, 0.
            cout << "This is not GPHOTO2 device." << endl;
            return -2;
        }
        cout << "List of camera settings: " << endl
                << (const char *) (intptr_t) cap.get(CAP_PROP_GPHOTO2_WIDGET_ENUMERATE)
                << endl;
        cap.set(CAP_PROP_GPHOTO2_COLLECT_MSGS, true);
    }

    cap.set(CAP_PROP_GPHOTO2_PREVIEW, true);
    cap.set(CAP_PROP_VIEWFINDER, true);
    cap >> frame; // To check PREVIEW output Size.
    if (!GlobalArgs.output.empty())
    {
        Size S = Size((int) cap.get(CAP_PROP_FRAME_WIDTH), (int) cap.get(CAP_PROP_FRAME_HEIGHT));
        int fourCC = CV_FOURCC('M', 'J', 'P', 'G');
        videoWriter.open(GlobalArgs.output, fourCC, GlobalArgs.fps, S, true);
        if (!videoWriter.isOpened())
        {
            cerr << "Cannot open output file " << GlobalArgs.output << endl;
            showHelp(argv[0], false);
            return -1;
        }
    }
    showHelp(argv[0], true); // welcome msg

    if (GlobalArgs.minimumFocusStep == 0)
    {
        state.minFocusStep = findMinFocusStep(cap, FOCUS_STEP / 16, -FOCUS_DIRECTION_INFTY);
    }
    else
    {
        state.minFocusStep = GlobalArgs.minimumFocusStep;
    }
    focusDriveEnd(cap, -FOCUS_DIRECTION_INFTY); // Start with closest

    char key = 0;
    while (key != 'q' && key != 27 /*ESC*/)
    {
        cap >> frame;
        if (frame.empty())
        {
            break;
        }
        if (!GlobalArgs.output.empty())
        {
            videoWriter << frame;
        }

        if (focus && !GlobalArgs.measure)
        {
            int stepToCorrect = correctFocus(lastSucceeded, state, rateFrame(frame));
            lastSucceeded = cap.set(CAP_PROP_ZOOM,
                    max(stepToCorrect, state.minFocusStep) * state.direction);
            if ((!lastSucceeded) || (stepToCorrect < state.minFocusStep))
            {
                if (--GlobalArgs.breakLimit <= 0)
                {
                    focus = false;
                    state.step = state.minFocusStep * 4;
                    cout << "In focus, you can press 'f' to improve with small step, "
                            "or 'r' to reset." << endl;
                }
            }
            else
            {
                GlobalArgs.breakLimit = DEFAULT_BREAK_LIMIT;
            }
        }
        else if (GlobalArgs.measure)
        {
            double rate = rateFrame(frame);
            if (!cap.set(CAP_PROP_ZOOM, state.minFocusStep))
            {
                if (--GlobalArgs.breakLimit <= 0)
                {
                    break;
                }
            }
            else
            {
                cout << rate << endl;
            }
        }

        if ((focus || GlobalArgs.measure) && GlobalArgs.verbose)
        {
            cout << "STATE\t" << state << endl;
            cout << "Output from camera: " << endl
                    << (const char *) (intptr_t) cap.get(CAP_PROP_GPHOTO2_FLUSH_MSGS) << endl;
        }

        imshow(windowOriginal, frame);
        switch (key = static_cast<char>(waitKey(30)))
        {
            case 'k': // focus out
                cap.set(CAP_PROP_ZOOM, 100);
                break;
            case 'j': // focus in
                cap.set(CAP_PROP_ZOOM, -100);
                break;
            case ',': // Drive to closest
                focusDriveEnd(cap, -FOCUS_DIRECTION_INFTY);
                break;
            case '.': // Drive to infinity
                focusDriveEnd(cap, FOCUS_DIRECTION_INFTY);
                break;
            case 'r': // reset focus state
                focus = true;
                state = createInitialState();
                break;
            case 'f': // focus switch on/off
                focus ^= true;
                break;
        }
    }

    if (GlobalArgs.verbose)
    {
        cout << "Captured " << (int) cap.get(CAP_PROP_FRAME_COUNT) << " frames"
                << endl << "in " << (int) (cap.get(CAP_PROP_POS_MSEC) / 1e2)
                << " seconds," << endl << "at avg speed "
                << (cap.get(CAP_PROP_FPS)) << " fps." << endl;
    }

    return 0;
}