Ejemplo n.º 1
0
void colorKeyingHSV(const string&videoPath){
	// Video laden
	VideoCapture video;
	video.open(videoPath);
	int width = video.get(CV_CAP_PROP_FRAME_WIDTH);
	int height = video.get(CV_CAP_PROP_FRAME_HEIGHT);

	namedWindow("Video");
	namedWindow("Hue");
	createTrackbar("Lower", "Hue", 0, 180);
	setTrackbarPos("Lower", "Hue", lowerHue);
	createTrackbar("Upper", "Hue", 0, 180);
	setTrackbarPos("Upper", "Hue", upperHue);

	namedWindow("Saturation");
	createTrackbar("Select", "Saturation", 0, 255);
	setTrackbarPos("Select", "Saturation", threshSaturation);
	namedWindow("Maske");

	Mat hueFrame(height, width, CV_8UC1);
	Mat saturationFrame(height, width, CV_8UC1);
	Mat mask(height, width, CV_8UC1);

	int frameNumber = 0;
	while(true){
		Mat videoFrame;
		if (video.read(videoFrame) == false){
			break;
		}

		// in Graustufen wandeln
		Mat hsvFrame;
		cvtColor(videoFrame, hsvFrame, CV_BGR2HSV);	

		// Schwellen holen
		int threshSaturation = getTrackbarPos("Select", "Saturation");
		int lowerThreshHue = getTrackbarPos("Lower", "Hue");
		int upperThreshHue = getTrackbarPos("Upper", "Hue");

		// Pixel analysieren
		int sumx = 0;
		int sumy = 0;
		int countWhites = 0;
		for(int x = 0; x < videoFrame.cols; x++){
			for(int y = 0; y < videoFrame.rows; y++){
				Vec3b hsvPixel = hsvFrame.at<Vec3b>(y,x);
				int hue = hsvPixel[0];
				int saturation = hsvPixel[1];

				// Maskierung und Schwerpunktsberechnung
				if (saturation > threshSaturation && hue > lowerThreshHue && hue < upperThreshHue){
					mask.at<uchar>(y,x) = 255;
					sumx += x;
					sumy += y;
					countWhites++;
				}
				else{
					mask.at<uchar>(y,x) = 0;
				}

				// die folgenden Schritte sind eigentlich nicht nötig, sie dienen der Veranschaulichung
				if (hue > lowerThreshHue && hue < upperThreshHue){
					hueFrame.at<uchar>(y,x) = 255;
				}
				else{
					hueFrame.at<uchar>(y,x) = 0;
				}
				if (saturation > threshSaturation){
					saturationFrame.at<uchar>(y,x) = 255;
				}
				else{
					saturationFrame.at<uchar>(y,x) = 0;
				}
			}
		}
	
		// Schwerpunkt berechnen
		if (countWhites > 0){
			Point center(sumx/countWhites, sumy/countWhites);
			cross(videoFrame, center, crossLength, colorGreen);
		}
		
		imshow("Hue", hueFrame);
		imshow("Saturation", saturationFrame);
		imshow("Maske", mask);
		imshow("Video", videoFrame);
		waitKey(100);
	}
}
Ejemplo n.º 2
0
int main(int argc, char * argv[]){
	int patcharray[6]={15,20,25,30,35};
	int minwind[3]={5,10,15};
	FILE *pfilezp;//=fopen("Record.txt","w");
	FILE *objectf;
	FILE *tablef;
	FILE *patchf;
	time_t start,end;
	double wholecost;
	struct tm *ptr;
	int retry;
	int startFrame=0;
	bool nopoint=true;//是否显示点
	bool drawDec=false;//是否显示detection的框框
	bool cameraAgain=false;
	bool breaknow=false;//为了退出大循环所设的变量
	bool play=false;//是否切换到play模式	
	char *test[]={
		"-p parameters.yml -s car.mpg -b car.txt",
		"-p ../parameters.yml -s ../datasets/01_david/david.mpg -b ../datasets/01_david/init.txt",
		"-p ../parameters.yml -s ../datasets/02_jumping/jumping.mpg -b ../datasets/02_jumping/init.txt",
		"-p ../parameters.yml -s ../datasets/03_pedestrian1/pedestrian1.mpg -b ../datasets/03_pedestrian1/init.txt",
		"-p ../parameters.yml -s ../datasets/04_pedestrian2/pedestrian2.mpg -b ../datasets/04_pedestrian2/init.txt",
		"-p ../parameters.yml -s ../datasets/05_pedestrian3/pedestrian3.mpg -b ../datasets/05_pedestrian3/init.txt",
		"-p ../parameters.yml -s ../datasets/06_car/car.mpg -b ../datasets/06_car/init.txt",
		"-p ../parameters.yml -s ../datasets/07_motocross/motocross.mpg -b ../datasets/07_motocross/init.txt",
		//"-p ../parameters.yml -s ../datasets/08_volkswagen/volkswagen.mpg -b ../datasets/08_volkswagen/init.txt",
		"-p ../parameters.yml -s ../datasets/09_carchase/carchase.mpg -b ../datasets/09_carchase/init.txt",
		"-p ../parameters.yml -s ../datasets/10_panda/panda.mpg -b ../datasets/10_panda/init.txt",
		"-p ../parameters.yml -s ../datasets/11_test/test2.avi"};
	char *testt[]={"-p parameters.yml -im data"};//,"-p parameters.yml -s car.mpg -b init1.txt",
		//"-p parameters.yml -s test.avi",
	//	"-p parameters.yml -s motocross.mpg -b init2.txt"};
	for(int i=0;i<1;i++){
		for (int flag=0;flag<1;flag++)
		//for (int pi=0;pi<15;pi++)		
		{
			RNG RNG( int64 seed=-1 );
			double costsum[7]={0.0,0.0,0.0,0.0,0.0,0.0,0.0};
			if(flag==1)
				int tempp=1;
			isImage=false;
			breaknow=false;
			retry=-1;
			patchf=fopen("patchgpu.txt", "at");
			pfilezp=fopen("Record.txt","at");
			tablef=fopen("tableout.txt","at");
			objectf=fopen("objectf.txt", "at");			
			drawing_box = false;
			gotBB = false;
			tl = true;
			rep = false;
			fromfile=false;
			start=time(NULL); ptr=localtime(&start);
			printf(asctime(ptr));
			fprintf(pfilezp,asctime(ptr));
			wholecost = (double)getTickCount();
			VideoCapture capture;
			//CvCapture* capture;
			capture.open(1);
			//capture = cvCaptureFromCAM( CV_CAP_ANY);
			FileStorage fs;
			//Read options
			string s = test[flag];
			string del = " ";
			char test2[10][100];
			test2[4][0]='0';//这里有很奇怪的事情,下次循环时竟然保留了上次循环的test2的值,按理说test2是在循环里面定义的,应该是个局部变量,每次循环应该是新开的变量啊。
			vector<string> strs = splitEx(s, del);
			for ( unsigned int i = 0; i < strs.size(); i++)
			{  
				//  cout << strs[i].c_str() << endl;
				//	test2[i]=strs[i].c_str();
				strcpy(test2[i],strs[i].c_str());
				//cout<<test2[i]<<endl;
			}
			//int tp=strs.size();
			char *p[10];
			char **test3;//搞不太懂这里啊。。。
			for(int i=0;i<10;i++)
				p[i]=test2[i];
			test3=p; 	

			read_options(10,test3,capture,fs);

//			video = string(argv[1]);//目标视频//实验中输入参数就是这三行
//			capture.open(video);
//			readBB(argv[2]);//目标框


			// read_options(argc,argv,capture,fs);
			if(startFrame>0)//说明按下了r键,要我们重新手动选择框框
			{				
				box = Rect( 0, 0, 0, 0 );
				gotBB=false;
			}
			//   read_options(argc,argv,capture,fs);
			//Init camera
			if (!capture.isOpened()&&!isImage)//打不开视频而且不是图像序列
			{
				cout << "capture device failed to open!" << endl;
				return 1;
			}
			//Register mouse callback to draw the bounding box
			cvNamedWindow("TLD",CV_WINDOW_AUTOSIZE);
			cvSetMouseCallback("TLD", mouseHandler, NULL);
			//TLD framework
			TLD tld;
			//Read parameters file
			tld.read(fs.getFirstTopLevelNode());
//			tld.patch_size=atoi(argv[3]);
//			tld.min_win=atoi(argv[4]);	
			Mat frame;
			Mat last_gray;
			Mat first;
			if(fromCa)
			fromfile=false;
			fromCa=false;
			if (fromfile){
				if(!isImage){
					//	capture >> frame;
					totalFrameNumber = capture.get(CV_CAP_PROP_FRAME_COUNT);  
					cout<<"整个视频共"<<totalFrameNumber<<"帧"<<endl;
					//	capture.set( CV_CAP_PROP_POS_FRAMES,0); 似乎没有用
					for(int i=0;i<=startFrame;i++){
						capture.read(frame);}
					cvtColor(frame, last_gray, CV_RGB2GRAY);
					frame.copyTo(first);
				}
				else{
					totalFrameNumber = listCount;  
					cout<<"整个图像序列共"<<listCount<<"帧"<<endl;
					//	capture.set( CV_CAP_PROP_POS_FRAMES,0); 似乎没有用
					frame=imread(imageList[startFrame+2]);
					cvtColor(frame, last_gray, CV_RGB2GRAY);
					frame.copyTo(first);
				}

			}else{
				capture.set(CV_CAP_PROP_FRAME_WIDTH,340);
				capture.set(CV_CAP_PROP_FRAME_HEIGHT,240);
			}

			///Initialization
GETBOUNDINGBOX:
			while(!gotBB)
			{
				if (!fromfile){
					capture >> frame;
				}
				else
					first.copyTo(frame);
				cvtColor(frame, last_gray, CV_RGB2GRAY);
				drawBox(frame,box);
				imshow("TLD", frame);
				int cw=cvWaitKey(1);
				if (cw == 'q')
					return 0;
				if(cw=='p')
				{
					play=true;box=Rect( 0, 0, 15, 15 );
					break;
				}
			}
			if (min(box.width,box.height)<(int)fs.getFirstTopLevelNode()["min_win"]){
				cout << "Bounding box too small, try again." << endl;
				gotBB = false;
				goto GETBOUNDINGBOX;
			}
			//Remove callback
			cvSetMouseCallback( "TLD", NULL, NULL );
			printf("Initial Bounding Box = x:%d y:%d h:%d w:%d\n",box.x,box.y,box.width,box.height);

			//Output file
			FILE  *bb_file = fopen("bounding_boxes.txt","w");
			//fprintf(tablef,"%s\n",test2[3]);
			//TLD initialization
			tld.initNcc();
			tld.init(last_gray,box,bb_file);
			tld.initGpu(last_gray);
			///Run-time
			Mat current_gray;
			BoundingBox pbox;
			vector<Point2f> pts1;
			vector<Point2f> pts2;
			bool status=true;
			int frames = 1;
			int detections = 1;
			int flagg=startFrame;//记录是第几帧

			// pfilezp=fopen("Record.txt","w");
REPEAT:     
			//	capture.set( CV_CAP_PROP_POS_FRAMES,startFrame);  			
			while((!isImage&&capture.read(frame))||(isImage)){
				if(isImage){					
					frame=imread(imageList[startFrame++]);
					if(startFrame>listCount-1){
						box=Rect( 0, 0, 0, 0 );
						break;}
				}
				
				flagg++;
				double cost = (double)getTickCount();
				//get frame
				cvtColor(frame, current_gray, CV_RGB2GRAY);
				//Process Frame  				
				if(!play)
					tld.processFrame(last_gray,current_gray,pts1,pts2,pbox,status,tl,bb_file,tablef,costsum,objectf);  
				//Draw Points
				if (status&&!play){
					if(!nopoint){
						drawPoints(frame,pts1);
						drawPoints(frame,pts2,Scalar(0,255,0));
					}
					drawBox(frame,pbox,Scalar(255,255,255),2);
					detections++;
				}
				if(drawDec){
				//	for(int j=0;j<tld.dt.bb.size();j++)
				//		drawBox(frame,tld.grid[tld.dt.bb[j]]);
					for(int j=0;j<tld.dbb.size();j++)//此处为了论文中图片所写,才把dbb的显示出来,因为使用孤立点算法以后会存入dbb。
						drawBox(frame,tld.dbb[j]);
				}
				//Display
				imshow("TLD", frame);
				//swap points and images
				swap(last_gray,current_gray);
				pts1.clear();
				pts2.clear();
				frames++;
				if(frames==tld.pausenum) system("pause");				
				printf("Detection rate: %d/%d\n",detections,frames);
				if(frames==totalFrameNumber)
					tld.islast=true;
				cost=getTickCount()-cost;
				printf("--------------------------------process cost %gms\n", cost*1000/getTickFrequency());
				int c = waitKey(1);
				//int c= 'm';
				if(cameraAgain)
					c='c';//如果在camera模式下按下了r键则会回到这里。
				switch(c){
				case 'n'://测试下一段视频
					{				
					//	retry==1;
						breaknow=true; 
						gotBB=false;
						box = Rect(0, 0, 0, 0);
						break;
					}
				case 'q':{
					tld.endGpu();
					return 0;}
				case 'r'://要手动在当前帧重新选择目标框框
					{
						if(fromfile)
						{
						if(play)
						startFrame=flagg;
						else
							startFrame=flagg-1;
						play=false;
						flag--;
						retry=1;						
						breaknow=true;
						break;
						}
						else{//如果摄像头模式时按了r想重新选择目标框框键,则相当于想再次打开Cam模式
						cameraAgain=true;
						break;
						}
					}		
				case 'x':
					{
						nopoint=!nopoint;
						break;
					}
				case 'd':
					{
						drawDec=!drawDec;
						break;
					}
				case 'p':
					{
						play=!play;
						break;
					}
				case 's':
					{
						//想用来打开或者关闭控制台
						//#pragma comment( linker, "/subsystem:\"windows\" /entry:\"mainCRTStartup\"" )
					}
				case 'c'://切换到使用摄像头模式
					{
						cameraAgain=false;
						breaknow=true;
						fromCa=true;//readoptions函数里会用到这个判断,如果fromCa为true,即使有文件也不读相当于只是输入"-p ../parameters.yml"
						retry=1;
						box=Rect( 0, 0, 0, 0 );
						flag--;//循环倒退回去,但是不能
						break;
					}

				}
				if(breaknow) break;
				// if(flagg>=9)
				// fclose(pfilezp);
			} 
			tld.endGpu();
			fprintf(pfilezp,"num=%d %s\n",flag,test2[3]);
			fprintf(pfilezp,"patch_size=%d\n",tld.patch_size);
			fprintf(pfilezp,"USE GPU OR CPU:%s\n",tld.use);
			fprintf(pfilezp,"Initial Bounding Box = x:%d y:%d h:%d w:%d\n",box.x,box.y,box.width,box.height);
			fprintf(pfilezp,"Detection rate: %d/%d\n",detections,frames);
			fprintf(pfilezp,"classifier.pEx: %d classifier.nEx: %d\n", tld.classifier.pEx.size(),tld.classifier.nEx.size());
			fclose(bb_file);
			wholecost=(getTickCount()-wholecost);
			end=time(NULL);
			fprintf(pfilezp,"timecost = %gms \n",wholecost*1000/getTickFrequency());
			fprintf(pfilezp,"every frame cost %g ms \n",wholecost*1000/getTickFrequency()/frames);
			fprintf(pfilezp,"%gms %gms %gms\n\n",costsum[0]/frames,costsum[1]/frames,costsum[2]/frames);
			//依次打印视频名字/检测率/patch宽/patch高/总时间/filter1/filter2/detect/track/learn/filter1数据拷贝时间/filter2数据拷贝时间
			fprintf(patchf,"%s\t %d/%d\t%d\t%d\t%g\t%g\t%g\t%g\t%g\t%g\t%g\t%g\n",argv[1],detections,frames,tld.patch_size,tld.min_win,wholecost*1000/getTickFrequency()/frames,costsum[0]/frames,costsum[1]/frames,costsum[2]/frames,costsum[3]/frames,costsum[4]/frames,costsum[5]/frames,costsum[6]/frames);
			//time_t start,end;
			//start=time(NULL); ptr=localtime(&start); printf(asctime(ptr));	 
			//fprintf(pfilezp,"timecost2=%f ms\n",difftime(end,start)*1000);
			fclose(pfilezp);	
			fclose(tablef);
			fclose(patchf);
			if(retry==1)
			{
				continue;
			}//startFrame不归零
			startFrame=0;//重置startFrame			
			if (rep){
				rep = false;
				tl = false;
				fclose(bb_file);
				bb_file = fopen("final_detector.txt","w");
				//capture.set(CV_CAP_PROP_POS_AVI_RATIO,0);
				capture.release();
				capture.open(video);
				goto REPEAT;
			} 
		}
Ejemplo n.º 3
0
int main(int argc, char** argv)
{
    CommandLineParser parser(argc, argv, keys);
    parser.about("This sample demonstrates the use ot the HoG descriptor.");
    if (parser.has("help"))
    {
        parser.printMessage();
        return 0;
    }
    int camera = parser.get<int>("camera");
    string file = parser.get<string>("video");
    if (!parser.check())
    {
        parser.printErrors();
        return 1;
    }

    VideoCapture cap;
    if (file.empty())
        cap.open(camera);
    else
        cap.open(file.c_str());
    if (!cap.isOpened())
    {
        cout << "Can not open video stream: '" << (file.empty() ? "<camera>" : file) << "'" << endl;
        return 2;
    }

    cout << "Press 'q' or <ESC> to quit." << endl;
    cout << "Press <space> to toggle between Default and Daimler detector" << endl;
    Detector detector;
    Mat frame;
    for (;;)
    {
        cap >> frame;
        if (frame.empty())
        {
            cout << "Finished reading: empty frame" << endl;
            break;
        }
        int64 t = getTickCount();
        vector<Rect> found = detector.detect(frame);
        t = getTickCount() - t;

        // show the window
        {
            ostringstream buf;
            buf << "Mode: " << detector.modeName() << " ||| "
                << "FPS: " << fixed << setprecision(1) << (getTickFrequency() / (double)t);
            putText(frame, buf.str(), Point(10, 30), FONT_HERSHEY_PLAIN, 2.0, Scalar(0, 0, 255), 2, LINE_AA);
        }
        for (vector<Rect>::iterator i = found.begin(); i != found.end(); ++i)
        {
            Rect &r = *i;
            detector.adjustRect(r);
            rectangle(frame, r.tl(), r.br(), cv::Scalar(0, 255, 0), 2);
        }
        imshow("People detector", frame);

        // interact with user
        const char key = (char)waitKey(30);
        if (key == 27 || key == 'q') // ESC
        {
            cout << "Exit requested" << endl;
            break;
        }
        else if (key == ' ')
        {
            detector.toggleMode();
        }
    }
    return 0;
}
Ejemplo n.º 4
0
int main(int argc, char** argv)
{
  VideoCapture cap;
  HandDetection *hd = nullptr;

  std::srand(std::time(0));
  if(!cap.open(0))
    return 0;
  
  HandGesture hg;
  Mat pierre = imread("./pierre.png");
  Mat feuille = imread("./feuille.png");
  Mat ciseaux = imread("./ciseaux.png");

  for(;;) {
      Mat frame;
      cap >> frame;


      std::stringstream ss;

      if(hd == nullptr) {
	hd = new HandDetection(frame.rows, frame.cols);
	std::cout << "created HandDetection!" << std::endl;
      }

      hg = hd->detect(frame);
      Mat cpuImage = Mat::zeros( feuille.size(), CV_8UC3 );
      Mat playerImage = Mat::zeros( feuille.size(), CV_8UC3 );

      std::string player, cpu;
      switch(hg) {
      case PIERRE : player = "player = pierre"; playerImage = pierre; cpu = "cpu = feuille"; cpuImage = feuille; break;
      case FEUILLE : player = "player = feuille"; playerImage = feuille; cpu = "cpu = ciseaux"; cpuImage = ciseaux; break;
      case CISEAUX : player = "player = ciseaux"; playerImage = ciseaux; cpu = "cpu = pierre"; cpuImage = pierre; break;
      case ERROR : player = "player = nothing detected"; break;
      }
      putText(frame, cpu, Point(5, frame.rows * 0.98), FONT_HERSHEY_PLAIN, 1,  Scalar(0,0,255,255));
      putText(frame, player, Point(frame.cols - player.size() * 9, frame.rows * 0.98), FONT_HERSHEY_PLAIN, 1,  Scalar(0,0,255,255));

      for (int i = 0; i < feuille.rows; i++) {
	for (int j = 0; j < feuille.cols; j++) {
	  frame.at<Vec3b>(i, j) = cpuImage.at<Vec3b>(i, j);
	}
      }

      for (int i = 0; i < feuille.rows; i++) {
	for (int j = 0; j < feuille.cols; j++) {
	  frame.at<Vec3b>(i, j + frame.cols - playerImage.cols) = playerImage.at<Vec3b>(i, j);
	}
      }

      if(hd->handForUi.rows > 0) {
	cv::resize(hd->handForUi, hd->handForUi, feuille.size());

	for (int i = 0; i < feuille.rows; i++) {
	  for (int j = 0; j < feuille.cols; j++) {
	    Vec3b v = frame.at<Vec3b>(i + frame.rows * 0.3, j + frame.cols - playerImage.cols);
	    v[0] = hd->handForUi.at<uchar>(i, j);
	    v[1] = hd->handForUi.at<uchar>(i, j);
	    v[2] = hd->handForUi.at<uchar>(i, j);
	    frame.at<Vec3b>(i + frame.rows * 0.3, j + frame.cols - playerImage.cols) = v;
	  }
	}
      }


      putText(frame, "CPU", Point(feuille.rows * 0.3, feuille.cols * 1.1), FONT_HERSHEY_PLAIN, 2,  Scalar(0,0,255,255));
      putText(frame, "PLAYER", Point(frame.rows * 1.07, feuille.cols * 1.1), FONT_HERSHEY_PLAIN, 2,  Scalar(0,0,255,255));
    
      imshow("Game Frame", frame);
      	  
      if( waitKey(1) == 27 ) break;
    }
  return 0;
}
Ejemplo n.º 5
0
int main(int argc, const char * argv[])
{
    
    ft_data ftdata;
    if (argc<3) {
        cout<<argv[0]<<" user_profile_dir camera_profile.yaml";
        return 0;
    }

    fs::path baseDirPath(argv[1]);
    ASM_Gaze_Tracker poseTracker(baseDirPath / "trackermodel.yaml", fs::path(argv[2]));
    
    
    vector<Point3f> faceCrdRefVecs;
    faceCrdRefVecs.push_back(Point3f(0,0,0));
    faceCrdRefVecs.push_back(Point3f(50,0,0));
    faceCrdRefVecs.push_back(Point3f(0,50,0));
    faceCrdRefVecs.push_back(Point3f(0,0,50));
    
    VideoCapture cam;
    cam.open(0);
    if(!cam.isOpened()){
        return 0;
    }
    Mat rvec, tvec;
    Mat im;
    captureImage(cam,im);
    

    while(true){
        bool success = captureImage(cam, im, true);
        if (success == false) {
            break;
        }
        
        bool succeeded = poseTracker.featureTracking(im);
        if (succeeded)
            poseTracker.estimateFacePose();

        
        
        
        Mat frontim,flipback;
        flip(im,flipback,1);
        
        vector<Point2f> reprjCrdRefPts;
        vector<Point2f> reprjFeaturePts;
        poseTracker.projectPoints(poseTracker.facialPointsIn3D, reprjFeaturePts);
        poseTracker.projectPoints(faceCrdRefVecs, reprjCrdRefPts);
        line(im, reprjCrdRefPts[0], reprjCrdRefPts[1], Scalar(255,0,0),2);
        line(im, reprjCrdRefPts[0], reprjCrdRefPts[2], Scalar(0,255,0),2);
        line(im, reprjCrdRefPts[0], reprjCrdRefPts[3], Scalar(0,0,255),2);
        drawPoints(im, reprjFeaturePts);
        drawStringAtTopLeftCorner(im, "distance to camera:" + boost::lexical_cast<string>(poseTracker.distanceToCamera()));
        imshow("head pose",im);
        
        vector<Point2f> transformedPoints = poseTracker.tracker.points;
        fliplr(transformedPoints, im.size());
        Mat part;
        
        Mat hM = findHomography(poseTracker.facialPointsIn2D ,transformedPoints, 0);
        warpPerspective(flipback(boundingRect(transformedPoints)), frontim, hM, im.size());
        imshow("front", im);

        
        int c = waitKey(1)%256;
        if(c == 'q')break;
        
    }
    
}
Ejemplo n.º 6
0
int main(){

    // Open connection to Kinect
    cout << "Connecting to Kinect" << endl;
    
    VideoCapture kinect;
    kinect.open(CV_CAP_OPENNI);

    if ( !kinect.isOpened() ){
        cout << "Can't connect to Kinect" << endl;
        return 1;
    }
    
    
    // Registration - Loads calibration data to align depth map with visual camera
    if( kinect.get( CV_CAP_PROP_OPENNI_REGISTRATION ) == 0 ) 
        kinect.set( CV_CAP_PROP_OPENNI_REGISTRATION, 1 );

    // Initialize variables
    
    // Pixel Frames
    Mat depth_map;
    Mat depth_img;
    Mat point_cld;
    Mat point_img;
    Mat rgb_img;
    Mat hsv_img;
    Mat thres_img;
    Mat thres2_img;
    Mat prev_thres_img;
    Mat valid_mask;
    
    // Stored Positions
    vector<Vec3f> circles, circles2, circles3;
    Mat pos3;
    Mat vel3;
    vector<Point> mes2;
    vector<Point> pos2;
    vector<Point> vel2;
        
    // Get Properties
    int width  = kinect.get( CV_CAP_PROP_FRAME_WIDTH );
    int height = kinect.get( CV_CAP_PROP_FRAME_HEIGHT);
    int fps = kinect.get( CV_CAP_PROP_FPS );
    cout << "Resolution    " << width << "x" << height << "    FSP    " << fps << endl;
    
    // Visual Ball Tracking        
    Point center;
    Point predicted;
    Point corrected;
    Point direction;
    
    int radius;
    string color;
    
    // Give ball color thresholds to detect
    /* // Pink 
    color = "Pink";
    Scalar hsv_min  = Scalar(0, 50, 170, 0);
    Scalar hsv_max  = Scalar(10, 180, 256, 0);
    Scalar hsv_min2 = Scalar(170, 50, 170, 0);
    Scalar hsv_max2 = Scalar(256, 180, 256, 0);
    /*/ 
    //* // Green
    color = "Green";
    Scalar hsv_min  = Scalar(40, 50, 70, 0);
    Scalar hsv_max  = Scalar(80, 256, 256, 0);
    /*/ // Yellow
    color = "Yellow";
    Scalar hsv_min  = Scalar(20, 30, 50, 0);
    Scalar hsv_max  = Scalar(60, 256, 256, 0);
    /* 
    color = "Purple";
    Scalar hsv_min  = Scalar(115, 30, 50, 0);
    Scalar hsv_max  = Scalar(160, 256, 256, 0);
    //*/
    Scalar hsv_min2 = hsv_min;
    Scalar hsv_max2 = hsv_max;
    //*/
    cout << "Detecting " << color << " Ball" << endl;
    
    prev_thres_img = Mat::zeros(height, width, CV_8UC1);
    int frames_between_valid = 1;
    pos3.push_back(Vec3f(0,0,0));
    vel3.push_back(Vec3f(0,0,0));
        
    // Filter Parameters
    int erodeAmt = 1;
    int dilateAmt = 1;
    int alpha = 0.9;
    
    // Kalman Filter Computer Vision Tracking
    /*
    KalmanFilter KF3(6, 3, 0);
    KF3.transitionMatrix = *(Mat_<float>(6,6) << 1,0,0,1,0,0,
                                                0,1,0,0,1,0,
                                                0,0,1,0,0,1,
                                                0,0,0,1,0,0,
                                                0,0,0,0,1,0,
                                                0,0,0,0,0,1 );

	Mat processNoise(6, 1, CV_32F);
	Mat measurement = Mat::zeros(3, 1, CV_32F); */
	    
    KalmanFilter KF(4, 2, 0);
    Mat processNoise(4, 1, CV_32F);
    Mat_<float> measurement = Mat::zeros(2, 1, CV_32F); 

	KF.transitionMatrix = *(Mat_<float>(4, 4) << 1,0,0,0,   0,1,0,0,  0,0,1,0,  0,0,0,1);
    // Too slow, change variables
    setIdentity(KF.measurementMatrix);
    setIdentity(KF.processNoiseCov, Scalar::all(1e-1));
    setIdentity(KF.measurementNoiseCov, Scalar::all(1e-2));
    setIdentity(KF.errorCovPost, Scalar::all(.1));


    for (;;){
        
        if ( !kinect.grab() ) {
            cout << "Cannot grab images. Check Kinect Connection." << endl;
            return -1;
        }
        
        if (!( kinect.retrieve( depth_map, CV_CAP_OPENNI_DEPTH_MAP ) &&
            kinect.retrieve( point_cld, CV_CAP_OPENNI_POINT_CLOUD_MAP ) &&
            kinect.retrieve( rgb_img, CV_CAP_OPENNI_BGR_IMAGE ) &&
            kinect.retrieve( valid_mask, CV_CAP_OPENNI_VALID_DEPTH_MASK ))) {
            cout << "Could not retrieve image. Check Kinect." << endl;
            return -1;
        }

        // Scale depth map to show as image
        //depth_map.convertTo( depth_img, CV_8UC1, 255.0/8192.0 ); //scale 16 bit to 8 bit

        // Pixel Filtering of depth map / point cloud
        // Get rid of holes in depth map - necessary? median filter instead?
        point_img = point_cld.clone();
        dilate(point_img,point_img,Mat(),Point(-1,-1),dilateAmt);
        medianBlur(point_img, point_img, 3);
        erode(point_img,point_img,Mat(),Point(-1,-1),erodeAmt);
        
        // Weighted Moving Average
        // Smooth depth map/ remove jitter and noise
        // Joint Bilateral Filter - too slow? unnecessary?
        
        // Convert RGB to HSV Color Space
        cvtColor( rgb_img, hsv_img, CV_BGR2HSV );
        
        // Thresholding
        // Handle color wrap around by combining two halves (for red)
        inRange( hsv_img, hsv_min, hsv_max, thres_img );
        inRange( hsv_img, hsv_min2, hsv_max2, thres2_img );
        thres_img = thres_img | thres2_img;
        
        //namedWindow( "Thresholded Pre Image", CV_WINDOW_AUTOSIZE );
        //imshow( "Thresholded Pre Image", thres_img );

        // Get rid of more noise via Erode/Dialate
		erode(thres_img,thres_img,Mat(),Point(-1,-1),erodeAmt);
		dilate(thres_img,thres_img,Mat(),Point(-1,-1),dilateAmt);
		
		// Fill Holes - remove - induces white flickering
		//thres2_img = Scalar::all(255) - thres_img;
		//floodFill(thres2_img, Point(0,0), Scalar(0));
		//thres_img += thres2_img;
		
        // Hough Transform to detect circles
        // Works better with smoothing first
        GaussianBlur( thres_img, thres_img, Size(9, 9), 2, 2 );

        // weight then blur or blur then weight?
        // Weighted Average to Reduce Flickering
        addWeighted( thres_img, alpha, prev_thres_img, 1.0-alpha, 0.0, thres_img );
        prev_thres_img = thres_img;
        
        // TODO Use ROI for specified search space
        // res, min_dist, canny_upper, center_detection, min_r, max_r
        HoughCircles(thres_img, circles, CV_HOUGH_GRADIENT, 2, height, 400, 40, 5, 21);
        
        // Draw Trajectory
        for ( int i = 0; i < ((int) mes2.size())-1; ++i ) { 
            line( rgb_img, mes2[i], mes2[i+1], BLUE, 1 );
            line( rgb_img, pos2[i], pos2[i+1], RED, 1 );
        }
        
        // Draw Circles
        //TODO change to multiple circles and select one closest to last
        // Should only have at most one circle
        for (vector<Vec3f>::iterator it = circles.begin(); it != circles.end(); ++it ){ 
            printf("Ball! x=%f y=%f r=%f\n\r",(*it)[0],(*it)[1],(*it)[2] );
            center = Point(round((*it)[0]), round((*it)[1]));
            radius = round((*it)[2]);
            
            cout<<depth_map.at<unsigned short>(int(center.y), int(center.x))<<endl;
        	
            // Kalman Filter on 2D point from Computer Vision Tracking
            // Get rid of large jumps in tracking
            Mat prediction = KF.predict();
            predicted = Point(prediction.at<float>(0), prediction.at<float>(1));
            measurement.at<float>(0) = center.x;
            measurement.at<float>(1) = center.y;
            Mat correction = KF.correct(measurement);
            corrected = Point(correction.at<float>(0), correction.at<float>(1));

            if (pos2.empty()) {
                direction = Point(0,0);
            } else {
                direction = corrected - pos2.back();
            }
            vel2.push_back(direction);
            mes2.push_back(center);
            pos2.push_back(corrected);

            // Retrieve 3D position data from depth map/ point cloud
            // Calculate 3D velocity vector from two frames
            // TODO put velocity vector through Kalman Filter for stability or return a weighted average
            // Take median of all within range?
            //Mat roi = point_cld( Rect(center.x-radius, center.y-radius, 2*radius, 2*radius));
            //medianBlur(roi, roi, Size(3,3));
            Vec3f position = point_img.at<Vec3f>(int(center.y), int(center.x));
            Vec3f velocity = Vec3f(0,0,0);
            if (position != Vec3f(0,0,0)) { // Valid Point
                velocity = (position - pos3.at<Vec3f>(pos3.total()-1)) * (1.0 / (frames_between_valid * fps));
                vel3.push_back(velocity);
                pos3.push_back(position);
                frames_between_valid = 1;
            } else {
                frames_between_valid++;
            }
            if (frames_between_valid > 1000) {
                cout << "No depth data for ball. Please move backwards." << endl;
            }
            
            cout << position << endl;
            cout << velocity << endl;
            
            // Draw tracked circles
            circle( rgb_img, center, 3, BLUE, -1, 8, 0 );
            circle( rgb_img, center, radius, BLUE, 3, 8, 0 );
            circle( point_img, center, 3, BLUE, -1, 8, 0 );
            circle( point_img, center, radius, BLUE, 3, 8, 0 );
            circle( point_img, corrected, 3, RED, -1, 8, 0 );
            circle( point_img, corrected, radius, RED, 3, 8, 0 );

            // Draw predicted, observed, corrected, and velocity vectors
            drawArrow( rgb_img, corrected, corrected+(direction*2), RED, 2, CV_AA, 0 );
            //circle( rgb_img, center, 3, BLUE, -1, 8, 0 );
            circle( rgb_img, predicted, 3, GREEN, -1, 8, 0 );
            //circle( rgb_img, corrected, 3, RED, -1, 8, 0 );
        }

        // Plot 3d Trajectory
        Mat pos3_channels[3];
        Mat vel3_channels[3];
        split(pos3, pos3_channels);
        split(vel3, vel3_channels);

        Mat graph = Mat::zeros((plot_height+20)*2, plot_width*tick_size, CV_8UC3);
        // Title
        putText(graph, "Position (m)" , Point(20,20), FONT_HERSHEY_SIMPLEX, 0.5, RED, 1, CV_AA);
        putText(graph, "Velocity (cm/s)" , Point(20,round(graph.rows/2)), FONT_HERSHEY_SIMPLEX, 0.5, RED, 1, CV_AA);
        // Legend
        putText(graph, "x" , Point(10,2*plot_height), FONT_HERSHEY_SIMPLEX, 0.25, RED, 1, CV_AA);
        putText(graph, "y" , Point(10,2*plot_height+10), FONT_HERSHEY_SIMPLEX, 0.25, BLUE, 1, CV_AA);
        putText(graph, "z" , Point(10,2*plot_height+20), FONT_HERSHEY_SIMPLEX, 0.25, GREEN, 1, CV_AA);
        Point a, b;
        int start = (int)pos3.total() > plot_width ? (int)pos3.total() - plot_width : 0;
        for ( int t = start; t < ((int)pos3.total())-1; ++t ){
            // Position
            a = Point(tick_size*(t-start), round(pos3_channels[0].at<float>(t)*plot_height/2.0+margin));
            b = Point(tick_size*(t-start+1), round(pos3_channels[0].at<float>(t+1)*plot_height/2.0+margin));
            line( graph, a, b, RED, 1 ); //x?
            a = Point(tick_size*(t-start), round(pos3_channels[1].at<float>(t)*plot_height/2.0+margin));
            b = Point(tick_size*(t-start+1), round(pos3_channels[1].at<float>(t+1)*plot_height/2.0+margin));
            line( graph, a, b, GREEN, 1 ); //z
            a = Point(tick_size*(t-start), round(pos3_channels[2].at<float>(t)*plot_height/2.0+margin));
            b = Point(tick_size*(t-start+1), round(pos3_channels[2].at<float>(t+1)*plot_height/2.0+margin));
            line( graph, a, b, BLUE, 1 ); //y
            
            // Velocity
            a = Point(tick_size*(t-start), round(vel3_channels[0].at<float>(t)*50*plot_height+margin2));
            b = Point(tick_size*(t-start+1), round(vel3_channels[0].at<float>(t+1)*50*plot_height+margin2));
            line( graph, a, b, RED, 1 ); //x
            a = Point(tick_size*(t-start), round(vel3_channels[1].at<float>(t)*50*plot_height+margin2));
            b = Point(tick_size*(t-start+1), round(vel3_channels[1].at<float>(t+1)*50*plot_height+margin2));
            line( graph, a, b, GREEN, 1 ); //z
            a = Point(tick_size*(t-start), round(vel3_channels[2].at<float>(t)*50*plot_height+margin2));
            b = Point(tick_size*(t-start+1), round(vel3_channels[2].at<float>(t+1)*50*plot_height+margin2));
            line( graph, a, b, BLUE, 1 ); //y
        }
       
        namedWindow( "Graph", CV_WINDOW_AUTOSIZE );
        imshow( "Graph", graph );
        
        // Display Image
        namedWindow( "Original Image", CV_WINDOW_AUTOSIZE );
        imshow( "Original Image", rgb_img );
        //namedWindow( "HSV Image", CV_WINDOW_AUTOSIZE );
        //imshow( "HSV Image", hsv_img );
        namedWindow( "Thresholded Image", CV_WINDOW_AUTOSIZE );
        imshow( "Thresholded Image", thres_img );
        //namedWindow( "Depth Map", CV_WINDOW_AUTOSIZE ); 
        //imshow( "Depth Map", point_img );
        namedWindow( "Point Cloud", CV_WINDOW_AUTOSIZE ); 
        imshow( "Point Cloud", point_img);
        
        char key = waitKey(30);
        // hit ESC to exit
        if ( key == 27 ) break;
        // hit s to save
        else if( key == 's' ) {
           imwrite("_rgb.png", rgb_img);
           imwrite("_thres.png", thres_img);
           imwrite("_depth.png", depth_img);
           imwrite("_point.png", point_img);
           imwrite("_graph.png", graph);
        } 
        // hit c to clear
        else if( key == 'c' ) {
            mes2.clear();
            pos2.clear();
            vel2.clear();
            pos3 = Mat::zeros(1, 1, CV_32FC3);
            vel3 = Mat::zeros(1, 1, CV_32FC3);
        }
    }
    cout << "Can't grab images from Kinect" << endl;

}
Ejemplo n.º 7
0
int main(int argc, char** argv)
{
	if (argc != 3) {
		help(argv);
		return 1;
	}

	// Verify the input values
	//VideoCapture cap(argv[1]); // open the passed video

	VideoCapture cap;

	// Futile attempt to try differetn codecs
	//cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('D', 'I', 'V', '4'));
	//cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('D', 'A', 'V', 'C'));
	//cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('3', 'I', 'V', '2'));
	//cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('3', 'I', 'V', 'X'));
	//cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('A', 'V', 'C', '1'));
	cap.set(CV_CAP_PROP_FOURCC, CV_FOURCC('H', '2', '6', '4'));
	cap.open(argv[1]);
	
	if (!cap.isOpened()) {		// check if we succeeded
		cout << "\nCan not open video file '" << argv[1] << "'" << endl;
		return -1;
	} else {
		cout << "Video " << argv[1] << endl;
		cout << " width  =" << cap.get(CV_CAP_PROP_FRAME_WIDTH) << endl;
		cout << " height =" << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;
		cout <<	" nframes=" << cap.get(CV_CAP_PROP_FRAME_COUNT) << endl;
		cout << " fps    =" << cap.get(CV_CAP_PROP_FPS) << endl;
	}

	// Load the trail of locations
	location_train locations;

	if (locations.load(argv[2]) != location_train::error_code::no_error) {
		cout << "Cannot load the location file '" << argv[2] << "'" << endl;
		return -1;
	}

	// do the simple sanity check
	if (locations.getCount() != cap.get(CV_CAP_PROP_FRAME_COUNT)) {
		cout << "Data points don't match." << endl;
		cout << " n frames   =" << cap.get(CV_CAP_PROP_FRAME_COUNT) << endl;
		cout << " n locations=" << locations.getCount() << endl;
		return -1;
	}

	location_train::point_t ul{ 0,0 };
	location_train::point_t lr{ (unsigned long)cap.get(CV_CAP_PROP_FRAME_WIDTH),(unsigned long)cap.get(CV_CAP_PROP_FRAME_HEIGHT) };

	if (locations.verify(ul, lr) != location_train::error_code::no_error) {
		cout << "Data points don't fit into video space." << endl;
		return -1;
	}

	// Set up the detector with default parameters.
	SimpleBlobDetector detector;

	auto loc_index = 0;
	auto fps = cap.get(CV_CAP_PROP_FPS);

	// Process frame by frame
	for (;;)
	{
		Mat frame;
		cap >> frame; // get a new frame from the file
		double frame_time = loc_index / fps;

		// Detect blobs.
		std::vector<KeyPoint> keypoints;
		detector.detect(frame, keypoints);
		
		// No need to check the range since we already verified that the number of locations
		// is the same as the number of frames
		auto location = locations[loc_index];
		loc_index++;

		if (keypoints.size() == 0) {
			cout << "Error: No objects found at time: " << frame_time << endl;
		}
		bool located = false;
		for ( auto key : keypoints ) {
			// The found blob should be at least 3x3
			if (key.size > 3) {
				if (inPoint(key.pt, key.size, location)) {
					located = true;
					break;
				}
			}
		}
		if (!located) {
			cout << "Error: No objects at time: " << frame_time << "located at expected position" << endl;
		}
	}

	// the video file will be deinitialized automatically in VideoCapture destructor
	return 0;
}
/*decision navTree(blob blobPosition, blob blobPosition2, Mat finalImage, bool findBlob1, bool findBlob2) {
    decision navDecision;
    if (findBlob1) {                            //if first sample has not been retrieved
        if (blobPosition.maxArea<MAX_AREA) {    //if first sample is not close enough to be retrieved
            navDecision.lookBlob1=true;
            if (blobPosition2.blobDetect && findBlob2) {     //if the second sample is also on screen and we are looking for it
                if (blobPosition2.maxArea<MAX_AREA) {        //if the second sample is not close enough to be retrieved
                    navDecision.lookBlob2=true;
                    if (blobPosition.maxArea>=blobPosition2.maxArea) {  //Navigate towards largest/closest sample
                        navDecision.lspeed=blobPosition.lturn*MAX_SPEED;
                        navDecision.rspeed=blobPosition.rturn*MAX_SPEED;
                        cv::circle(finalImage,cv::Point(blobPosition.xPos,blobPosition.yPos),10,cv::Scalar(0,0,0));
                    } else {
                        navDecision.lspeed=blobPosition2.lturn*MAX_SPEED;
                        navDecision.rspeed=blobPosition2.rturn*MAX_SPEED;
                        cv::circle(finalImage,cv::Point(blobPosition2.xPos,blobPosition2.yPos),10,cv::Scalar(0,0,0));
                    }
                } else {
                    cout<<"Blob 2 was found\n";
                    navDecision.lookBlob2=false;
                }
            } else {                            //if the second sample isn't on screen, then navigate towards first sample
                if (findBlob2) {
                    navDecision.lookBlob2=true;
                } else {
                    navDecision.lookBlob2=false;
                }
                navDecision.lspeed=blobPosition.lturn*MAX_SPEED;
                navDecision.rspeed=blobPosition.rturn*MAX_SPEED;
                cv::circle(finalImage,cv::Point(blobPosition.xPos,blobPosition.yPos),10,cv::Scalar(0,0,0));
            }
        } else {                                //if first sample is close enough to be retrieved
            cout<<"Blob 1 was found\n";         //then stop looking for first sample
            navDecision.lookBlob1=false;
        }
    } else if (findBlob2) {                     //if we found the first sample. but are still looking for the second sample
        if (blobPosition2.maxArea<MAX_AREA) {
            if (blobPosition.blobDetect&&findBlob1) {
                if (blobPosition2.maxArea>blobPosition2.maxArea) {
                    lspeed=blobPosition2.lturn*MAX_SPEED;
                    rspeed=blobPosition2.rturn*MAX_SPEED;
                    cv::circle(finalImage,cv::Point(blobPosition2.xPos,blobPosition2.yPos),10,cv::Scalar(0,0,255));
                } else {
                    lspeed=blobPosition.lturn*MAX_SPEED;
                    rspeed=blobPosition.rturn*MAX_SPEED;
                    cv::circle(finalImage,cv::Point(blobPosition.xPos,blobPosition.yPos),10,cv::Scalar(0,0,0));
                }
            } else {
                lspeed=blobPosition2.lturn*MAX_SPEED;
                rspeed=blobPosition2.rturn*MAX_SPEED;
                cv::circle(finalImage,cv::Point(blobPosition2.xPos,blobPosition2.yPos),10,cv::Scalar(0,0,255));
            }
        } else {
            cout<<"Blob 2 was found\n";
            findBlob2=false;
        }
    }
}*/
int main( int argc, char** argv )
{
    initRoboteq();//Refer to nasaroboteq.c
    VideoCapture cap;
    blob blobPosition, blobPosition2;
    Mat frame, temp, temp2;
    Mat bgrImage, hsvImage, hsvOutputImage, hsvOutputImage2, finalImage;
    static int lspeed, rspeed;
    int x,y;
    int mArea;
    int width;
    bool useMorphOps=true;
    bool findBlob1 = true;
    bool findBlob2 = true;

    cap.open(0); //0:default web cam   1:external web cam
    trackbarInit();

    int fin=1;
    while(fin>=1)
    {
        cap>>frame;
        frame.copyTo(bgrImage);
        width=frame.cols;
        cvtColor(bgrImage, hsvImage, COLOR_BGR2HSV);
        inRange(hsvImage,Scalar(h_low,s_low,v_low), Scalar(h_high,s_high,v_high), hsvOutputImage);
        inRange(hsvImage,Scalar(h_low2,s_low2,v_low2), Scalar(h_high2,s_high2,v_high2), hsvOutputImage2);
        //imshow("Before Morph", hsvOutputImage2);
        if(useMorphOps) {
                morphOps(hsvOutputImage);
                morphOps(hsvOutputImage2);
        }
        finalImage=hsvOutputImage2|hsvOutputImage;
        hsvOutputImage.copyTo(temp);
        hsvOutputImage2.copyTo(temp2);
        blobPosition=blobCenter(temp, width);
        blobPosition2=blobCenter(temp2, width);



        if (findBlob1||findBlob2) {                     //if all samples have not been retrieved
            if (findBlob1) {                            //if first sample has not been retrieved
                if (blobPosition.maxArea<MAX_AREA) {    //if first sample is not close enough to be retrieved
                    if (blobPosition2.blobDetect && findBlob2) {     //if the second sample is also on screen and we are looking for it
                        if (blobPosition2.maxArea<MAX_AREA) {        //check to see if second sample is close enough to be retrieved
                            if (blobPosition.maxArea>=blobPosition2.maxArea) {  //Navigate towards largest/closest sample
                                lspeed=blobPosition.lturn*MAX_SPEED;
                                rspeed=blobPosition.rturn*MAX_SPEED;
                                cv::circle(finalImage,cv::Point(blobPosition.xPos,blobPosition.yPos),10,cv::Scalar(0,0,255));
                            } else {
                                lspeed=blobPosition2.lturn*MAX_SPEED;
                                rspeed=blobPosition2.rturn*MAX_SPEED;
                                cv::circle(finalImage,cv::Point(blobPosition2.xPos,blobPosition2.yPos),10,cv::Scalar(0,0,255));
                            }
                        } else {
                            cout<<"Blob 2 was found\n";
                            findBlob2=false;
                        }
                    } else {                            //if the second sample isn't on screen or we aren't looking for it, then navigate towards first sample
                        lspeed=blobPosition.lturn*MAX_SPEED;
                        rspeed=blobPosition.rturn*MAX_SPEED;
                        cv::circle(finalImage,cv::Point(blobPosition.xPos,blobPosition.yPos),10,cv::Scalar(0,0,255));
                    }
                } else {                                //if first sample is close enough to be retrieved
                    cout<<"Blob 1 was found\n";         //then stop looking for first sample
                    findBlob1=false;
                }
            } else if (findBlob2) {                         //if the first sample was retrieved but the second one hasn't
                if (blobPosition2.maxArea<MAX_AREA) {
                    lspeed=blobPosition2.lturn*MAX_SPEED;
                    rspeed=blobPosition2.rturn*MAX_SPEED;
                    cv::circle(finalImage,cv::Point(blobPosition2.xPos,blobPosition2.yPos),10,cv::Scalar(0,0,255));
                } else {
                    cout<<"Blob 2 was found\n";
                    findBlob2=false;
                }
            }
        } else {                                        //If all the samples have been retrieved
            lspeed=rspeed=0;                            //Then end the program (in future, make robot go home)
            fin=0;
            cout<<"Robot has reached home base\n";
        }



        cout<<"Left Speed: "<<lspeed<<" Right Speed: "<<rspeed<<"\n";
        //sendspeed(lspeed, rspeed);
        //imshow("Orange", hsvOutputImage);
        //imshow("Blue", hsvOutputImage2);
        imshow("Combined Image", finalImage);
        //bw2color(bgrImage, finalImage);
        if( waitKey(1) == 27 ) break; // stop capturing by pressing ESC
        }
}
Ejemplo n.º 9
0
int main(int argc, char *argv[])
{
	ros::init(argc, argv, "verify_tracking_node");
	ros::NodeHandle n;
	std::string port;
	ros::param::param<std::string>("~port", port, "/dev/ttyACM0");
	int baud;
	ros::param::param<int>("~baud", baud, 57600);
	ros::Rate loop_rate(10);

	ros::Publisher servox_pub = n.advertise<std_msgs::Char>("servox_chatter", 1000);
	ros::Publisher servoy_pub = n.advertise<std_msgs::Char>("servoy_chatter", 1000);
	ros::Publisher motor_pub = n.advertise<std_msgs::Char>("motor_chatter", 1000);

	const int person_number = 3;
	cv_result_t cv_result = CV_OK;
	cv_handle_t handle_detect = NULL;
	cv_handle_t handle_track = NULL;
	cv_handle_t handle_verify = NULL;
	cv_feature_t *p_feature_new_1[person_number];

	int main_return = -1;
	int verify_flag = 0;
	int face_detect_flag = 0;
	VideoCapture capture;
	capture.open(0);         // open the camera
	if (!capture.isOpened()) {
		fprintf(stderr, "Liveness can not open camera!\n");
		return -1;
	}
	int frame_width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
	int frame_height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
    int frame_half_width = frame_width >> 1;
	int frame_half_height = frame_height >> 1;
	//printf("width %d height %d \n", frame_width, frame_height);
	Point expect(frame_half_width , frame_half_height);
	handle_verify = cv_verify_create_handle("data/verify.tar");
	if(!handle_verify){
		fprintf(stderr, "failed to init verify handle \n");
		goto RETURN;
	}
	handle_track = cv_face_create_tracker(NULL, CV_FACE_SKIP_BELOW_THRESHOLD);
	if (!handle_track) {
		fprintf(stderr, "fail to init track handle\n");
		goto RETURN;
	}
	handle_detect = cv_face_create_detector(NULL, CV_FACE_SKIP_BELOW_THRESHOLD | CV_DETECT_ENABLE_ALIGN);
	if (!handle_detect) {
		fprintf(stderr, "fail to init detect handle\n");
		//goto RETURN;
		return -1;
	}
	create_verify_feature_db(handle_detect, handle_verify, person_number, p_feature_new_1, frame_width, frame_height);
	while(1)
	{
		int verfiy_flag = verify_from_camera(handle_detect, handle_verify, capture, p_feature_new_1, person_number, frame_width, frame_height);
		if(verify_flag != 1)
			continue;
		face_track(handle_track, capture, expect, frame_width, frame_height, servox_pub, servoy_pub, motor_pub);
	}
	for(int i = 1; i < person_number; i++)
	{
		cv_verify_release_feature(p_feature_new_1[i]);
	}
	// destroy verify handle
RETURN:
	// release the memory of face
	cv_verify_destroy_handle(handle_verify);
	// destroy detect handle
	cv_face_destroy_detector(handle_detect);

	fprintf(stderr, "test finish!\n");
}
Ejemplo n.º 10
0
int mainPop()
{
    VideoCapture cap;
    //cap.open(0); 
    cap.open("pool.avi"); 
	//cap.open("vid1.mp4"); 
    if( !cap.isOpened() )
    {

        puts("***Could not initialize capturing...***\n");
        return 0;
    }
    namedWindow( "Capture ", CV_WINDOW_AUTOSIZE);
    namedWindow( "Foreground ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Edges ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Canny ", CV_WINDOW_AUTOSIZE );
    Mat frame,foreground,image,edges,canny,mogcanny,mask;
	calpha=calpha_slider=166;
	cbeta=cbeta_slider=171;
    BackgroundSubtractorMOG2 mog,mogc;
	cap>>frame;
	mask=Mat::zeros(frame.size(),CV_8UC1);
	vector<vector<Point> > maskc;
    for(;;)
    {
        cap>>frame;  
		for(int i=0;i<10;i++)
		{
			if(frame.empty())
				cap>>frame;
		}
        if( frame.empty() )
                break;
        image=frame.clone();
		GaussianBlur(image,image,Size(3, 3), 2, 2 );//mais rapido q median mas menos qualidade
        mog(image,foreground,-1);
		createTrackbar( "A", "Capture ", &calpha_slider, calpha_slider_max, on_calpha_trackbar );
		createTrackbar( "B", "Capture ", &cbeta_slider, cbeta_slider_max, on_cbeta_trackbar );
		Canny(image,canny,calpha,cbeta);
		mogc.set("backgroundRatio",0.01);
		//mogc.set("nmixtures",1);
		mogc(canny,mogcanny,0.01);
		//bitwise_not(mogcanny,mogcanny,mogcanny);
		int minsize = 5;
		
		/*if(!maskc.empty())
		{
			for( int i = 0; i< maskc.size(); i++ )
			{
				Scalar color = Scalar( 255,255,255 );
				Mat aux = Mat::zeros(foreground.size(),CV_8UC1);
				drawContours( aux, maskc, i, color, 3, 8, noArray(), 0, Point());
				int nzt = countNonZero(aux);
				bitwise_and(mogcanny,aux,aux);
				int nz=countNonZero(aux);
				double per = nz/double(nzt);
				if(per<0.05)
				{
					maskc.erase(maskc.begin()+i);
				}
			}
		}*/
		//maskc.clear();
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;
		mask = Mat::zeros( foreground.size(), CV_8UC1 );
		/*findContours(mask,contours,hierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		mask = Mat::zeros( foreground.size(), CV_8UC1 );
		for( int i = 0; i< contours.size(); i++ )
		{
			Scalar color = Scalar( 255,255,255 );
			Mat aux = Mat::zeros(foreground.size(),CV_8UC1);
			drawContours( aux, contours, i, color, 3, 8, hierarchy, 0, Point());
			int nzt = countNonZero(aux);
			bitwise_and(mogcanny,aux,aux);
			int nz=countNonZero(aux);
			double per = nz/double(nzt);
			if(per>0.05)
			{
				drawContours( mask, contours, i, (255,255,255), -1, 8, hierarchy, 0, Point());
			}
		}*/
		findContours(foreground,contours,hierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		Mat drawing = Mat::zeros( foreground.size(), CV_8UC1 );
		
		for( int i = 0; i< contours.size(); i++ )
		{
			Scalar color = Scalar( 255,255,255 );
			Mat aux = Mat::zeros(foreground.size(),CV_8UC1);
			if(contours[i].size()>minsize)
			{
				drawContours( aux, contours, i, color, 3, 8, hierarchy, 0, Point());
				int nzt = countNonZero(aux);
				bitwise_and(mogcanny,aux,aux);
				int nz=countNonZero(aux);
				double per = nz/double(nzt);
				if(per>0.01)
				{
					drawContours( mask, contours, i, color, 3, 8, hierarchy, 0, Point());
					//maskc.push_back(contours[i]);
				}
			}
		}
		/*for( int i = 0; i< maskc.size(); i++ )
		{
			drawContours( mask, maskc, i, (255,255,255), -1, 8, noArray(), 0, Point());
		}*/
		
        imshow( "Capture ",frame );
        //image.copyTo(foreground,foreground); //cores
		imshow("Foreground ",mask);
		imshow("Canny ",canny);
		imshow("Edges ",mogcanny);
		char c = (char)waitKey(1); //mais rapido
        if( c == 27 )   
            break;

    }
}
Ejemplo n.º 11
0
int main(int argc, const char* argv[])
{
    const char* keys =
        "{ h help     | false           | print help message }"
        "{ l left     |                 | specify left image }"
        "{ r right    |                 | specify right image }"
        "{ o output   | tvl1_output.jpg | specify output save path }"
        "{ c camera   | 0               | enable camera capturing }"
        "{ m cpu_mode | false           | run without OpenCL }"
        "{ v video    |                 | use video as input }";

    CommandLineParser cmd(argc, argv, keys);

    if (cmd.has("help"))
    {
        cout << "Usage: pyrlk_optical_flow [options]" << endl;
        cout << "Available options:" << endl;
        cmd.printMessage();
        return EXIT_SUCCESS;
    }

    string fname0 = cmd.get<string>("l");
    string fname1 = cmd.get<string>("r");
    string vdofile = cmd.get<string>("v");
    string outpath = cmd.get<string>("o");
    bool useCPU = cmd.get<bool>("s");
    bool useCamera = cmd.get<bool>("c");
    int inputName = cmd.get<int>("c");

    UMat frame0, frame1;
    imread(fname0, cv::IMREAD_GRAYSCALE).copyTo(frame0);
    imread(fname1, cv::IMREAD_GRAYSCALE).copyTo(frame1);
    cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();

    UMat flow;
    Mat show_flow;
    vector<UMat> flow_vec;
    if (frame0.empty() || frame1.empty())
        useCamera = true;

    if (useCamera)
    {
        VideoCapture capture;
        UMat frame, frameCopy;
        UMat frame0Gray, frame1Gray;
        UMat ptr0, ptr1;

        if(vdofile.empty())
            capture.open( inputName );
        else
            capture.open(vdofile.c_str());

        if(!capture.isOpened())
        {
            if(vdofile.empty())
                cout << "Capture from CAM " << inputName << " didn't work" << endl;
            else
                cout << "Capture from file " << vdofile << " failed" <<endl;
            goto nocamera;
        }

        cout << "In capture ..." << endl;
        for(int i = 0;; i++)
        {
            if( !capture.read(frame) )
                break;

            if (i == 0)
            {
                frame.copyTo( frame0 );
                cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
            }
            else
            {
                if (i%2 == 1)
                {
                    frame.copyTo(frame1);
                    cvtColor(frame1, frame1Gray, COLOR_BGR2GRAY);
                    ptr0 = frame0Gray;
                    ptr1 = frame1Gray;
                }
                else
                {
                    frame.copyTo(frame0);
                    cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
                    ptr0 = frame1Gray;
                    ptr1 = frame0Gray;
                }

                alg->calc(ptr0, ptr1, flow);
                split(flow, flow_vec);

                if (i%2 == 1)
                    frame1.copyTo(frameCopy);
                else
                    frame0.copyTo(frameCopy);
                getFlowField(flow_vec[0].getMat(ACCESS_READ), flow_vec[1].getMat(ACCESS_READ), show_flow);
                imshow("tvl1 optical flow field", show_flow);
            }

            char key = (char)waitKey(10);
            if (key == 27)
                break;
            else if (key == 'm' || key == 'M')
            {
                ocl::setUseOpenCL(!cv::ocl::useOpenCL());
                cout << "Switched to " << (ocl::useOpenCL() ? "OpenCL" : "CPU") << " mode\n";
            }
        }

        capture.release();
    }
    else
    {
nocamera:
        if (cmd.has("cpu_mode"))
        {
            ocl::setUseOpenCL(false);
            std::cout << "OpenCL was disabled" << std::endl;
        }
        for(int i = 0; i <= LOOP_NUM; i ++)
        {
            cout << "loop" << i << endl;

            if (i > 0) workBegin();

            alg->calc(frame0, frame1, flow);
            split(flow, flow_vec);

            if (i > 0 && i <= LOOP_NUM)
                workEnd();

            if (i == LOOP_NUM)
            {
                if (useCPU)
                    cout << "average CPU time (noCamera) : ";
                else
                    cout << "average GPU time (noCamera) : ";
                cout << getTime() / LOOP_NUM << " ms" << endl;

                getFlowField(flow_vec[0].getMat(ACCESS_READ), flow_vec[1].getMat(ACCESS_READ), show_flow);
                imshow("PyrLK [Sparse]", show_flow);
                imwrite(outpath, show_flow);
            }
        }
    }

    waitKey();

    return EXIT_SUCCESS;
}
Ejemplo n.º 12
0
int mainD2()
{
    VideoCapture cap;
    //cap.open(0); 
    cap.open("pool.avi"); 
	//cap.open("vid1.mp4"); 
    if( !cap.isOpened() )
    {

        puts("***Could not initialize capturing...***\n");
        return 0;
    }
    namedWindow( "Capture ", CV_WINDOW_AUTOSIZE);
    namedWindow( "Foreground ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Edges ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Canny ", CV_WINDOW_AUTOSIZE );
    Mat frame,foreground,image,edges,canny,mask;
	IplImage *iplframe;
	calpha=calpha_slider=166;
	cbeta=cbeta_slider=171;
    BackgroundSubtractorMOG2 mog;
    int fps=cap.get(CV_CAP_PROP_FPS);
    if(fps<=0)
        fps=10;
    else
        fps=1000/fps;
	cap>>frame;
	//mask=Mat::zeros(frame.size(),CV_8UC1);
	/*Mat cmask;
	Canny(frame,cmask,calpha,cbeta);*/
    for(;;)
    {
        cap>>frame;  
		for(int i=0;i<10;i++)
		{
			if(frame.empty())
				cap>>frame;
		}
        if( frame.empty() )
                break;
        image=frame.clone();
		GaussianBlur(image,image,Size(3, 3), 2, 2 );//mais rapido q median mas menos qualidade
        mog(image,foreground,-1);
		createTrackbar( "A", "Capture ", &calpha_slider, calpha_slider_max, on_calpha_trackbar );
		createTrackbar( "B", "Capture ", &cbeta_slider, cbeta_slider_max, on_cbeta_trackbar );
		Canny(image,canny,calpha,cbeta);
		//Canny(foreground,edges,calpha,cbeta);
		//canny=edges.clone();
		Rect rect = Rect(42, 42, 435, 205);
		//edges = edges(rect);
		//canny = canny(rect);
		//fin=fin(rect);
		//foreground = foreground(rect);
		
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;
		findContours(foreground,contours,hierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		Mat drawing = Mat::zeros( foreground.size(), CV_8UC1 );
		int momsize=0;
		int minsize = 5;
		for( int i = 0; i< contours.size(); i++ )
		{
			Scalar color = Scalar( 255,255,255 );
			if(contours[i].size()>minsize)
				drawContours( drawing, contours, i, color, 3, 8, hierarchy, 0, Point());
				momsize++;
		}
		
		Mat band=Mat::zeros(foreground.size(),CV_8UC1);
		//subtract(mask,cmask,mask);
		bitwise_and(canny,mask,mask);
		bitwise_and(canny,drawing,band);
		bitwise_or(mask,band,mask);
		/*Mat band2=Mat::zeros(band.size(), CV_8UC1);
		bitwise_and(canny,drawing,band2);
		bitwise_and(band,band2,band);*/
		
		//band.copyTo(mask);
		//bitwise_and(canny,mask,mask);
		/*
		/// Get the moments
		vector<Moments> mu(momsize);
		int j=0;
		for( int i = 0; i < contours.size(); i++ )
		{ 
			if(contours[i].size()<maxsize&&contours[i].size()>minsize)
				mu[j] = moments( contours[i], false ); 
				j++;
		}

		///  Get the mass centers:
		vector<Point2f> mc( momsize );
		for( int i = 0; i < momsize; i++ )
		{ 
			mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); 
		}
		//draw
		for( int i = 0; i < momsize; i++ )
		{ 
			circle(drawing,mc[i],10,Scalar(255,0,0),-1);
		}*/

		/*for(int i = 0;i<contours.size();i++)
		{
			printf("(%d;%d)\n",mc[i].x,mc[i].y);
		}*/
        //threshold(foreground,foreground,128,255,THRESH_BINARY);
        //medianBlur(foreground,foreground,9);//melhor mas lento
        //erode(foreground,foreground,Mat());
		//dilate(foreground,foreground,Mat());
		//foreground |= edges;
		//erode(foreground,foreground,Mat());
		//Mat for2=foreground.clone(); 
		//bitwise_not ( for2, for2 );
		//Mat match = foreground.clone(); 
		
		/*vector<Vec3f> circles;
		HoughCircles(foreground, circles, CV_HOUGH_GRADIENT,2,10,100,30,2,20);
		for( size_t i = 0; i < circles.size(); i++ )
		{
			Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
			int radius = cvRound(circles[i][2]);
			// draw the circle center
			circle( image, center, 3, Scalar(0,255,0), -1, 8, 0 );
			// draw the circle outline
			circle( image, center, radius, Scalar(0,0,255), 3, 8, 0 );
		}*/

		
		/*double maxHist=0;
		double minHist=0;
		minMaxLoc(h_hist, &minHist, &maxHist, 0, 0);*/
		//printf("Max H: %d\n",pos);

        imshow( "Capture ",frame );
        //image.copyTo(foreground,foreground); //cores
        imshow("Foreground ",mask);
		imshow("Canny ",canny);
		imshow("Edges ",drawing);
        //char c = (char)waitKey(fps);
		char c = (char)waitKey(1); //mais rapido
        if( c == 27 )   
            break;

    }
}
Ejemplo n.º 13
0
int mainD3()
{
    VideoCapture cap;
    //cap.open(0); 
    cap.open("pool.avi"); 
	//cap.open("vid1.mp4"); 
    if( !cap.isOpened() )
    {

        puts("***Could not initialize capturing...***\n");
        return 0;
    }
    namedWindow( "Capture ", CV_WINDOW_AUTOSIZE);
    namedWindow( "Foreground ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Edges ", CV_WINDOW_AUTOSIZE );
	namedWindow( "Canny ", CV_WINDOW_AUTOSIZE );
    Mat frame,foreground,image,edges,canny,mask;
	IplImage *iplframe;
	calpha=calpha_slider=166;
	cbeta=cbeta_slider=171;
    BackgroundSubtractorMOG2 mog;
    int fps=cap.get(CV_CAP_PROP_FPS);
    if(fps<=0)
        fps=10;
    else
        fps=1000/fps;
	cap>>frame;
	mask=Mat::zeros(frame.size(),CV_8UC1);
	/*Mat cmask;
	Canny(frame,cmask,calpha,cbeta);*/
    for(;;)
    {
        cap>>frame;  
		for(int i=0;i<10;i++)
		{
			if(frame.empty())
				cap>>frame;
		}
        if( frame.empty() )
                break;
        image=frame.clone();
		GaussianBlur(image,image,Size(3, 3), 2, 2 );//mais rapido q median mas menos qualidade
        mog(image,foreground,-1);
		createTrackbar( "A", "Capture ", &calpha_slider, calpha_slider_max, on_calpha_trackbar );
		createTrackbar( "B", "Capture ", &cbeta_slider, cbeta_slider_max, on_cbeta_trackbar );
		Canny(image,canny,calpha,cbeta);
		//Canny(foreground,edges,calpha,cbeta);
		//canny=edges.clone();
		Rect rect = Rect(42, 42, 435, 205);
		//edges = edges(rect);
		//canny = canny(rect);
		//fin=fin(rect);
		//foreground = foreground(rect);
		
		vector<vector<Point> > contours;
		vector<Vec4i> hierarchy;
		findContours(foreground,contours,hierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		Mat drawing = Mat::zeros( foreground.size(), CV_8UC1 );
		int momsize=0;
		int minsize = 5;
		for( int i = 0; i< contours.size(); i++ )
		{
			Scalar color = Scalar( 255,255,255 );
			if(contours[i].size()>minsize)
				drawContours( drawing, contours, i, color, 3, 8, hierarchy, 0, Point());
				momsize++;
		}
		vector<vector<Point> > ccontours;
		vector<Vec4i> chierarchy;
		findContours(canny,ccontours,chierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		Scalar color = Scalar( 255,255,255 );
		vector<vector<Point> > mcontours;
		vector<Vec4i> mhierarchy;
		findContours(mask,mcontours,mhierarchy,CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		for( int i = 0; i< mcontours.size(); i++ )
		{
			int in=0;
			int tot = mcontours[i].size(); 
			for(int j=0;j<tot;j++)
			{
				Point p = mcontours[i][j];
				if(canny.at<uchar>(p.y,p.x) > 0)
					in++;
			}
			double f = in/double(tot);
			if(f>0.5&&ccontours[i].size()>minsize)
				drawContours( mask, ccontours, i, color, 3, 8, hierarchy, 0, Point());
			//comparar canny com mascara e reset de mascara
		}
		Mat auxmask = Mat::zeros(foreground.size(),CV_8UC1);
		for( int i = 0; i< ccontours.size(); i++ )
		{
			int in=0;
			int tot = ccontours[i].size(); 
			for(int j=0;j<tot;j++)
			{
				Point p = ccontours[i][j];
				if(drawing.at<uchar>(p.y,p.x) > 0)
					in++;
			}
			double f = in/double(tot);
			if(f>0.5&&ccontours[i].size()>minsize)
				drawContours( auxmask, ccontours, i, color, 3, 8, hierarchy, 0, Point());
		}
		for( int i = 0; i< contours.size(); i++ )
		{
			int in=0;
			int tot = contours[i].size(); 
			for(int j=0;j<tot;j++)
			{
				Point p = contours[i][j];
				if(auxmask.at<uchar>(p.y,p.x) > 0)
					in++;
			}
			double f = in/double(tot);
			if(f>0.1&&contours[i].size()>minsize)
				drawContours( mask, contours, i, color, 3, 8, hierarchy, 0, Point());
		}
		/*Mat band=Mat::zeros(foreground.size(),CV_8UC1);
		bitwise_and(canny,mask,mask);
		bitwise_and(canny,drawing,band);
		bitwise_or(mask,band,mask);*/
		/*Mat band2=Mat::zeros(band.size(), CV_8UC1);
		bitwise_and(canny,drawing,band2);
		bitwise_and(band,band2,band);*/
		
		//band.copyTo(mask);
		//bitwise_and(canny,mask,mask);
		
        imshow( "Capture ",frame );
        //image.copyTo(foreground,foreground); //cores
        imshow("Foreground ",mask);
		imshow("Canny ",canny);
		imshow("Edges ",drawing);
        //char c = (char)waitKey(fps);
		char c = (char)waitKey(1); //mais rapido
        if( c == 27 )   
            break;

    }
}
Ejemplo n.º 14
0
int main( int argc, const char** argv )
{
    VideoCapture cap;
    Rect trackWindow;
    int hsize = 16;
    float hranges[] = {0,180};
    const float* phranges = hranges;
    CommandLineParser parser(argc, argv, keys);
    if (parser.has("help"))
    {
        help();
        return 0;
    }
    int camNum = parser.get<int>(0);
    cap.open(camNum);

    if( !cap.isOpened() )
    {
        help();
        cout << "***Could not initialize capturing...***\n";
        cout << "Current parameter's value: \n";
        parser.printMessage();
        return -1;
    }


    Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH),    // Acquire input size
                  (int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));

    VideoWriter videoStream;
    videoStream.open("./VirtualPiano.mp4", -1, cap.get(CV_CAP_PROP_FPS), S, true);
    if (!videoStream.isOpened())
    {
        cout  << "Could not open the output video." << endl;
        return -1;
    }
    cout << hot_keys;
    //namedWindow( "Histogram", 0 );
    namedWindow( "VirtualPiano", 0 );
    resizeWindow( "VirtualPiano", WINDOW_WIDTH, WINDOW_HEIGHT);
    setMouseCallback( "VirtualPiano", onMouse, 0 );
    //createTrackbar( "Vmin", "CamShift Demo", &vmin, 256, 0 );
    //createTrackbar( "Vmax", "CamShift Demo", &vmax, 256, 0 );
    //createTrackbar( "Smin", "CamShift Demo", &smin, 256, 0 );

    Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
    RotatedRect trackBox;
    bool paused = false;

    for(;;)
    {
        if( !paused )
        {
            cap >> frame;
            if( frame.empty() )
                break;
        }

        frame.copyTo(image);
        Mat flippedImage;
        flip(image, flippedImage, 1);
        image = flippedImage;
        if( !paused )
        {
            cvtColor(image, hsv, COLOR_BGR2HSV);

            if( trackObject )
            {
                int _vmin = vmin, _vmax = vmax;

                inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
                        Scalar(180, 256, MAX(_vmin, _vmax)), mask);
                int ch[] = {0, 0};
                hue.create(hsv.size(), hsv.depth());
                mixChannels(&hsv, 1, &hue, 1, ch, 1);

                if( trackObject < 0 )
                {
                    Mat roi(hue, selection), maskroi(mask, selection);
                    calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
                    normalize(hist, hist, 0, 255, NORM_MINMAX);

                    trackWindow = selection;
                    trackObject = 1;

                    histimg = Scalar::all(0);
                    int binW = histimg.cols / hsize;
                    Mat buf(1, hsize, CV_8UC3);
                    for( int i = 0; i < hsize; i++ )
                        buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
                    cvtColor(buf, buf, COLOR_HSV2BGR);

                    for( int i = 0; i < hsize; i++ )
                    {
                        int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
                        rectangle( histimg, Point(i*binW,histimg.rows),
                                   Point((i+1)*binW,histimg.rows - val),
                                   Scalar(buf.at<Vec3b>(i)), -1, 8 );
                    }
                }

                calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
                backproj &= mask;
                trackBox = CamShift(backproj, trackWindow,
                                    TermCriteria( TermCriteria::EPS | TermCriteria::COUNT, 10, 1 ));
                if( trackWindow.area() <= 1 )
                {
                    int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5)/6;
                    trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
                                       trackWindow.x + r, trackWindow.y + r) &
                                  Rect(0, 0, cols, rows);
                }

                if( backprojMode )
                    cvtColor( backproj, image, COLOR_GRAY2BGR );
                ellipse( image, trackBox, Scalar(0,0,255), 3, LINE_AA );

            }
        }
        else if( trackObject < 0 )
            paused = false;

        if( selectObject && selection.width > 0 && selection.height > 0 )
        {
            Mat roi(image, selection);
            bitwise_not(roi, roi);
        }
        Size size = image.size();
        int thickness;
        for(int x = 0; x < NOTES_IN_ROW; ++x){
            for(int y = 0; y < NOTES_IN_COLUMN; ++y){
                Rect rect(Point(x*size.width/NOTES_IN_ROW, y*size.height/NOTES_IN_COLUMN), Point( (x+1)*size.width/NOTES_IN_ROW,(y+1)*size.height/NOTES_IN_COLUMN));
                if ( rect.contains(trackBox.center) && trackObject){
                    thickness = -1;
                }
                else{
                    thickness = 1;
                }
                rectangle(image, rect, NOTE_COLORS[x*NOTES_IN_ROW + y], thickness, 8);
            }
        }

        imshow( "VirtualPiano", image); 
        videoStream.write( image);
        //imshow( "Histogram", histimg );

        char c = (char)waitKey(10);
        if( c == 27 ){
            break;
        }
        switch(c)
        {
        case 'b':
            backprojMode = !backprojMode;
            break;
        case 'c':
            trackObject = 0;
            histimg = Scalar::all(0);
            break;
        case 'h':
            showHist = !showHist;
            if( !showHist )
                destroyWindow( "Histogram" );
            else
                namedWindow( "Histogram", 1 );
            break;
        case 'p':
            paused = !paused;
            break;
        default:
            ;
        }
    }
int main( int argc, char** argv )
{
    Mat img_template_cpu = imread( argv[1],IMREAD_GRAYSCALE);
    gpu::GpuMat img_template;
    img_template.upload(img_template_cpu);

    //Detect keypoints and compute descriptors of the template
    gpu::SURF_GPU surf;
    gpu::GpuMat keypoints_template, descriptors_template;

    surf(img_template,gpu::GpuMat(),keypoints_template, descriptors_template);

    //Matcher variables
    gpu::BFMatcher_GPU matcher(NORM_L2);   

    //VideoCapture from the webcam
    gpu::GpuMat img_frame;
    gpu::GpuMat img_frame_gray;
    Mat img_frame_aux;
    VideoCapture cap;
    cap.open(0);
    if (!cap.isOpened()){
        cerr << "cannot open camera" << endl;
        return -1;
    }
    int nFrames = 0;
    uint64 totalTime = 0;
    //main loop
    for(;;){
        int64 start = getTickCount();
        cap >> img_frame_aux;
        if (img_frame_aux.empty())
            break;
        img_frame.upload(img_frame_aux);
        cvtColor(img_frame,img_frame_gray, CV_BGR2GRAY);

        //Step 1: Detect keypoints and compute descriptors
        gpu::GpuMat keypoints_frame, descriptors_frame;
        surf(img_frame_gray,gpu::GpuMat(),keypoints_frame, descriptors_frame);

        //Step 2: Match descriptors
        vector<vector<DMatch> >matches;
        matcher.knnMatch(descriptors_template,descriptors_frame,matches,2);

        //Step 3: Filter results
        vector<DMatch> good_matches;
        float ratioT = 0.7;
        for(int i = 0; i < (int) matches.size(); i++)
        {
            if((matches[i][0].distance < ratioT*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }
        // Step 4: Download results
        vector<KeyPoint> keypoints1, keypoints2;
        vector<float> descriptors1, descriptors2;
        surf.downloadKeypoints(keypoints_template, keypoints1);
        surf.downloadKeypoints(keypoints_frame, keypoints2);
        surf.downloadDescriptors(descriptors_template, descriptors1);
        surf.downloadDescriptors(descriptors_frame, descriptors2);

        //Draw the results
        Mat img_result_matches;
        drawMatches(img_template_cpu, keypoints1, img_frame_aux, keypoints2, good_matches, img_result_matches);
        imshow("Matching a template", img_result_matches);

        int64 time_elapsed = getTickCount() - start;
        double fps = getTickFrequency() / time_elapsed;
        totalTime += time_elapsed;
        nFrames++;
        cout << "FPS : " << fps << endl;

        int key = waitKey(30);
        if (key == 27)
            break;;
    }
    double meanFps = getTickFrequency() / (totalTime / nFrames);
    cout << "Mean FPS: " << meanFps << endl;

    return 0;
}
int main(int argc, char* argv[]) {

	//Check if exactly 2 Arguments are provided
	if (argc != 3) {
		cout
				<< "Requires three (name and path) input: TextFile Video OutputFilename (without extension)"
				<< endl;
		return -1;
	}
	cv::Mat template_img;
	cv::Mat result_mat2;
	cv::Mat debug_img2;

	template_img = cv::imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
	if (template_img.data == NULL) {
		printf("imread() failed...\n");
		return -1;
	}

	//initialize capture to read input video file
	VideoCapture cap;
	cap.open(argv[1]);

	if (!cap.isOpened())  // check if open
		return -1;

	//create window to show video

    namedWindow("RESULT", CV_WINDOW_AUTOSIZE);
	double w = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
	double h = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
	//cout << "Frame Size = " << w << "x" << h << endl;

	//get the frame size
	Size size(static_cast<int>(w), static_cast<int>(h));


	//Output File
	ofstream ofile ("objectDetected.txt");
	if(!ofile.is_open())
	{
	    cout<<"Could not open the file"<<endl;
	    return -1;
	}

	Mat frame; //Declare frame to grab video
	int frame_no = 0; //Declare variable to keep track of frame number

	while (1) {

		bool ifgrab = cap.grab();
		if (!ifgrab)
			break;
		bool ifwrite = cap.retrieve(frame);
		if (!ifwrite)
			break;
		cv::cvtColor(frame, debug_img2, CV_BGR2GRAY);



		int match_method = CV_TM_SQDIFF_NORMED; //CV_TM_CCORR_NORMED;
		cv::matchTemplate(debug_img2, template_img, result_mat2, match_method);

		//threshold(result_mat2,result_mat2,0.9,1.,CV_THRESH_TOZERO);
		//normalize(result_mat2, result_mat2, 0, 1, NORM_MINMAX, -1, Mat());

		double minVal;
		double maxVal;
		Point minLoc, maxLoc, matchLoc;

		//////////////////////////////////Handle Multiple Occurrences///////////////////////////////////////////////////
		Mat old_mat = result_mat2.clone();
		//Image Pyramid
		// Mat imgPyr;
		//pyrUp( template_img, imgPyr, Size( template_img.cols*2, template_img.rows*2 ) );
		//imshow("PYR",imgPyr);
		/*
		/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
		if (match_method == CV_TM_SQDIFF
				|| match_method == CV_TM_SQDIFF_NORMED) {
			result_mat2 = 1.0 - result_mat2;
		}
		// get the top 10 maximums...
		vector<Point> res;
		findMatch(result_mat2, res, 10);

		int i = 1;
		while (i < res.size()) {
			Point matchLoc = res.at(i);

			rectangle(frame, matchLoc,
						Point(matchLoc.x + template_img.cols,
								matchLoc.y + template_img.rows),
						CV_RGB(0, 0, 255), 3);
			ofile<<"Swiss Ball"<<frame_no<<" "<<matchLoc.x<<" "<<matchLoc.y<<" "<<(matchLoc.x + template_img.cols)<<" "<<(matchLoc.y + template_img.rows);
			ofile<<"\n";

			i++;

		}
		*/
		////////////////////////////////////////////Threshold the value and display//////////////////////////////////////////

		minMaxLoc(old_mat, &minVal, &maxVal, &minLoc, &maxLoc, Mat());
		//cout<<minVal<<" "<<maxVal<<endl;
		if (match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED)
			matchLoc = minLoc;
		else
			matchLoc = maxLoc;

		if(minVal<0.15 && minVal>0)	{

			rectangle(frame, matchLoc,
					Point(matchLoc.x + template_img.cols,
							matchLoc.y + template_img.rows), CV_RGB(0, 0, 255),
					3);

		}

        imshow("RESULT", frame);
		//Object frame UpperLeftX UpperLeftY LowerRightX LowerRightY
		ofile<<"Swiss Ball "<<frame_no<<" "<<matchLoc.x<<" "<<matchLoc.y<<" "<<(matchLoc.x + template_img.cols)<<" "<<(matchLoc.y + template_img.rows);
		ofile<<"\n";
		frame_no++;

		//delay 30ms
		waitKey(30);
	}
	//Release the objects
	cap.release();

	return 0;
}
Ejemplo n.º 17
0
int main(){
	//set recording and startNewRecording initially to false.
	bool recording = false;
	bool startNewRecording = false;
	int inc=0;
	bool firstRun = true;
	//if motion is detected in the video feed, we will know to start recording.
	bool motionDetected = false;

	//pause and resume code (if needed)
	bool pause = false;
	//set debug mode and trackingenabled initially to false
	//these can be toggled using 'd' and 't'
	debugMode = false;
	trackingEnabled = true;
	//set up the matrices that we will need
	//the two frames we will be comparing
	Mat frame1,frame2;
	//their grayscale images (needed for absdiff() function)
	Mat grayImage1,grayImage2;
	//resulting difference image
	Mat differenceImage;
	//thresholded difference image (for use in findContours() function)
	Mat thresholdImage;
	//video capture object.
	VideoCapture capture;
	capture.open(0);
	VideoWriter oVideoWriter;//create videoWriter object, not initialized yet
	double dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
	double dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
	//set framesize for use with videoWriter
	Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));

	if(!capture.isOpened()){
		cout<<"ERROR ACQUIRING VIDEO FEED\n";
		getchar();
		return -1;
	}
	while(1){


		//read first frame
		capture.read(frame1);
		//convert frame1 to gray scale for frame differencing
		cv::cvtColor(frame1,grayImage1,COLOR_BGR2GRAY);
		//copy second frame
		capture.read(frame2);
		//convert frame2 to gray scale for frame differencing
		cv::cvtColor(frame2,grayImage2,COLOR_BGR2GRAY);
		//perform frame differencing with the sequential images. This will output an "intensity image"
		//do not confuse this with a threshold image, we will need to perform thresholding afterwards.
		cv::absdiff(grayImage1,grayImage2,differenceImage);
		//threshold intensity image at a given sensitivity value
		cv::threshold(differenceImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
		if(debugMode==true){
			//show the difference image and threshold image
			cv::imshow("Difference Image",differenceImage);
			cv::imshow("Threshold Image", thresholdImage);
		}else{
			//if not in debug mode, destroy the windows so we don't see them anymore
			cv::destroyWindow("Difference Image");
			cv::destroyWindow("Threshold Image");
		}
		//blur the image to get rid of the noise. This will output an intensity image
		cv::blur(thresholdImage,thresholdImage,cv::Size(BLUR_SIZE,BLUR_SIZE));
		//threshold again to obtain binary image from blur output
		cv::threshold(thresholdImage,thresholdImage,SENSITIVITY_VALUE,255,THRESH_BINARY);
		if(debugMode==true){
			//show the threshold image after it's been "blurred"

			imshow("Final Threshold Image",thresholdImage);

		}
		else {
			//if not in debug mode, destroy the windows so we don't see them anymore
			cv::destroyWindow("Final Threshold Image");
		}

		//if tracking enabled, search for Motion
		if(trackingEnabled){

			//check for motion in the video feed
			//the detectMotion function will return true if motion is detected, else it will return false.
			//set motionDetected boolean to the returned value.
			motionDetected = detectMotion(thresholdImage,frame1);

		}else{ 
			//reset our variables if tracking is disabled
			motionDetected = false;

		}

////////////**STEP 1**//////////////////////////////////////////////////////////////////////////////////////////////////////////////
		//draw time stamp to video in bottom left corner. We draw it before we write so that it is written on the video file.


		//if we're in recording mode, write to file
		if(recording){

			//check if it's our first time running the program so that we don't create a new video file over and over again.
			//we use the same boolean check to create a new recording if we want.
			if(firstRun == true || startNewRecording == true){

//////////**STEP 3**///////////////////////////////////////////////////////////////////////////////////////////////////////////////
				//Create a unique filename for each video based on the date and time the recording has started
				string videoFileName = "D:/MyVideo"+intToString(inc)+".avi";

				cout << "File has been opened for writing: " << videoFileName<<endl;
				
				cout << "Frame Size = " << dWidth << "x" << dHeight << endl;

				oVideoWriter  = VideoWriter(videoFileName, CV_FOURCC('D', 'I', 'V', '3'), 20, frameSize, true);

				if ( !oVideoWriter.isOpened() ) 
				{
					cout << "ERROR: Failed to initialize video writing" << endl;
					getchar();
					return -1;
				}
				//reset our variables to false.
				firstRun = false;
				startNewRecording = false;


			}

			oVideoWriter.write(frame1);
			//show "REC" in top left corner in red
			//be sure to do this AFTER you write to the file so that "REC" doesn't show up on the recorded video file.
			//Cut and paste the following line above "oVideoWriter.write(frame1)" to see what I'm talking about.
			putText(frame1,"REC",Point(0,60),2,2,Scalar(0,0,255),2);


		}



		//check if motion is detected in the video feed.
		if(motionDetected){
			//show "MOTION DETECTED" in bottom left corner in green
			putText(frame1,"MOTION DETECTED",cv::Point(0,420),2,2,cv::Scalar(0,255,0));

//////////**STEP 2**///////////////////////////////////////////////////////////////////////////////////////////////////////////////
			//set recording to true since there is motion in the video feed.
			//else recording should be false.


		}
		//show our captured frame
		imshow("Frame1",frame1);

		//check to see if a button has been pressed.
		//the 30ms delay is necessary for proper operation of this program
		//if removed, frames will not have enough time to referesh and a blank image will appear.
		switch(waitKey(30)){

		case 27: //'esc' key has been pressed, exit program.
			return 0;
		case 116: //'t' has been pressed. this will toggle tracking (disabled for security cam)
			/*trackingEnabled = !trackingEnabled;
			if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
			else cout<<"Tracking enabled."<<endl;*/
			break;
		case 100: //'d' has been pressed. this will debug mode
			debugMode = !debugMode;
			if(debugMode == false) cout<<"Debug mode disabled."<<endl;
			else cout<<"Debug mode enabled."<<endl;
			break;
		case 112: //'p' has been pressed. this will pause/resume the code.
			pause = !pause;
			if(pause == true){ cout<<"Code paused, press 'p' again to resume"<<endl;
			while (pause == true){
				//stay in this loop until 
				switch (waitKey()){
					//a switch statement inside a switch statement? Mind blown.
				case 112: 
					//change pause back to false
					pause = false;
					cout<<"Code Resumed"<<endl;
					break;
				}
			}
			}

		case 114:
			//'r' has been pressed.
			//toggle recording mode
			recording =!recording;

			if (!recording)cout << "Recording Stopped" << endl;

			else cout << "Recording Started" << endl;

			break;

		case 110:
			//'n' has been pressed
			//start new video file
			startNewRecording = true;
			recording = true;
			cout << "New Recording Started" << endl;
			//increment video file name
			inc+=1;
			break; 

		}

	}

	return 0;

}
Ejemplo n.º 18
0
int main(int argc, char **argv)
{
    //Create a CMT object
    CMT cmt;

    //Initialization bounding box
    Rect rect;

    //Parse args
    int challenge_flag = 0;
    int loop_flag = 0;
    int verbose_flag = 0;
    int bbox_flag = 0;
    int skip_frames = 0;
    int skip_msecs = 0;
    int output_flag = 0;
    string input_path;
    string output_path;

    const int detector_cmd = 1000;
    const int descriptor_cmd = 1001;
    const int bbox_cmd = 1002;
    const int no_scale_cmd = 1003;
    const int with_rotation_cmd = 1004;
    const int skip_cmd = 1005;
    const int skip_msecs_cmd = 1006;
    const int output_file_cmd = 1007;

    struct option longopts[] =
    {
        //No-argument options
        {"challenge", no_argument, &challenge_flag, 1},
        {"loop", no_argument, &loop_flag, 1},
        {"verbose", no_argument, &verbose_flag, 1},
        {"no-scale", no_argument, 0, no_scale_cmd},
        {"with-rotation", no_argument, 0, with_rotation_cmd},
        //Argument options
        {"bbox", required_argument, 0, bbox_cmd},
        {"detector", required_argument, 0, detector_cmd},
        {"descriptor", required_argument, 0, descriptor_cmd},
        {"output-file", required_argument, 0, output_file_cmd},
        {"skip", required_argument, 0, skip_cmd},
        {"skip-msecs", required_argument, 0, skip_msecs_cmd},
        {0, 0, 0, 0}
    };

    int index = 0;
    int c;
    while((c = getopt_long(argc, argv, "v", longopts, &index)) != -1)
    {
        switch (c)
        {
            case 'v':
                verbose_flag = true;
                break;
            case bbox_cmd:
                {
                    //TODO: The following also accepts strings of the form %f,%f,%f,%fxyz...
                    string bbox_format = "%f,%f,%f,%f";
                    float x,y,w,h;
                    int ret = sscanf(optarg, bbox_format.c_str(), &x, &y, &w, &h);
                    if (ret != 4)
                    {
                        cerr << "bounding box must be given in format " << bbox_format << endl;
                        return 1;
                    }

                    bbox_flag = 1;
                    rect = Rect(x,y,w,h);
                }
                break;
            case detector_cmd:
                cmt.str_detector = optarg;
                break;
            case descriptor_cmd:
                cmt.str_descriptor = optarg;
                break;
            case output_file_cmd:
                output_path = optarg;
                output_flag = 1;
                break;
            case skip_cmd:
                {
                    int ret = sscanf(optarg, "%d", &skip_frames);
                    if (ret != 1)
                    {
                      skip_frames = 0;
                    }
                }
                break;
            case skip_msecs_cmd:
                {
                    int ret = sscanf(optarg, "%d", &skip_msecs);
                    if (ret != 1)
                    {
                      skip_msecs = 0;
                    }
                }
                break;
            case no_scale_cmd:
                cmt.consensus.estimate_scale = false;
                break;
            case with_rotation_cmd:
                cmt.consensus.estimate_rotation = true;
                break;
            case '?':
                return 1;
        }

    }

    // Can only skip frames or milliseconds, not both.
    if (skip_frames > 0 && skip_msecs > 0)
    {
      cerr << "You can only skip frames, or milliseconds, not both." << endl;
      return 1;
    }

    //One argument remains
    if (optind == argc - 1)
    {
        input_path = argv[optind];
    }

    else if (optind < argc - 1)
    {
        cerr << "Only one argument is allowed." << endl;
        return 1;
    }

    //Set up logging
    FILELog::ReportingLevel() = verbose_flag ? logDEBUG : logINFO;
    Output2FILE::Stream() = stdout; //Log to stdout

    //Challenge mode
    if (challenge_flag)
    {
        //Read list of images
        ifstream im_file("images.txt");
        vector<string> files;
        string line;
        while(getline(im_file, line ))
        {
            files.push_back(line);
        }

        //Read region
        ifstream region_file("region.txt");
        vector<float> coords = getNextLineAndSplitIntoFloats(region_file);

        if (coords.size() == 4) {
            rect = Rect(coords[0], coords[1], coords[2], coords[3]);
        }

        else if (coords.size() == 8)
        {
            //Split into x and y coordinates
            vector<float> xcoords;
            vector<float> ycoords;

            for (size_t i = 0; i < coords.size(); i++)
            {
                if (i % 2 == 0) xcoords.push_back(coords[i]);
                else ycoords.push_back(coords[i]);
            }

            float xmin = *min_element(xcoords.begin(), xcoords.end());
            float xmax = *max_element(xcoords.begin(), xcoords.end());
            float ymin = *min_element(ycoords.begin(), ycoords.end());
            float ymax = *max_element(ycoords.begin(), ycoords.end());

            rect = Rect(xmin, ymin, xmax-xmin, ymax-ymin);
            cout << "Found bounding box" << xmin << " " << ymin << " " <<  xmax-xmin << " " << ymax-ymin << endl;
        }

        else {
            cerr << "Invalid Bounding box format" << endl;
            return 0;
        }

        //Read first image
        Mat im0 = imread(files[0]);
        Mat im0_gray;
        cvtColor(im0, im0_gray, CV_BGR2GRAY);

        //Initialize cmt
        cmt.initialize(im0_gray, rect);

        //Write init region to output file
        ofstream output_file("output.txt");
        output_file << rect.x << ',' << rect.y << ',' << rect.width << ',' << rect.height << std::endl;

        //Process images, write output to file
        for (size_t i = 1; i < files.size(); i++)
        {
            FILE_LOG(logINFO) << "Processing frame " << i << "/" << files.size();
            Mat im = imread(files[i]);
            Mat im_gray;
            cvtColor(im, im_gray, CV_BGR2GRAY);
            cmt.processFrame(im_gray);
            if (verbose_flag)
            {
                display(im, cmt);
            }
            rect = cmt.bb_rot.boundingRect();
            output_file << rect.x << ',' << rect.y << ',' << rect.width << ',' << rect.height << std::endl;
        }

        output_file.close();

        return 0;
    }

    //Normal mode

    //Create window
    namedWindow(WIN_NAME);

    VideoCapture cap;

    bool show_preview = true;

    //If no input was specified
    if (input_path.length() == 0)
    {
        cap.open(0); //Open default camera device
    }

    //Else open the video specified by input_path
    else
    {
        cap.open(input_path);

        if (skip_frames > 0)
        {
          cap.set(CV_CAP_PROP_POS_FRAMES, skip_frames);
        }

        if (skip_msecs > 0)
        {
          cap.set(CV_CAP_PROP_POS_MSEC, skip_msecs);

          // Now which frame are we on?
          skip_frames = (int) cap.get(CV_CAP_PROP_POS_FRAMES);
        }

        show_preview = false;
    }

    //If it doesn't work, stop
    if(!cap.isOpened())
    {
        cerr << "Unable to open video capture." << endl;
        return -1;
    }

    //Show preview until key is pressed
    while (show_preview)
    {
        Mat preview;
        cap >> preview;

        screenLog(preview, "Press a key to start selecting an object.");
        imshow(WIN_NAME, preview);

        char k = waitKey(10);
        if (k != -1) {
            show_preview = false;
        }
    }

    //Get initial image
    Mat im0;
    cap >> im0;

    //If no bounding was specified, get it from user
    if (!bbox_flag)
    {
        rect = getRect(im0, WIN_NAME);
    }

    FILE_LOG(logINFO) << "Using " << rect.x << "," << rect.y << "," << rect.width << "," << rect.height
        << " as initial bounding box.";

    //Convert im0 to grayscale
    Mat im0_gray;
    if (im0.channels() > 1) {
        cvtColor(im0, im0_gray, CV_BGR2GRAY);
    } else {
        im0_gray = im0;
    }

    //Initialize CMT
    cmt.initialize(im0_gray, rect);

    int frame = skip_frames;

    //Open output file.
    ofstream output_file;

    if (output_flag)
    {
        int msecs = (int) cap.get(CV_CAP_PROP_POS_MSEC);

        output_file.open(output_path.c_str());
        output_file << OUT_FILE_COL_HEADERS << endl;
        output_file << frame << "," << msecs << ",";
        output_file << cmt.points_active.size() << ",";
        output_file << write_rotated_rect(cmt.bb_rot) << endl;
    }

    //Main loop
    while (true)
    {
        frame++;

        Mat im;

        //If loop flag is set, reuse initial image (for debugging purposes)
        if (loop_flag) im0.copyTo(im);
        else cap >> im; //Else use next image in stream

        if (im.empty()) break; //Exit at end of video stream

        Mat im_gray;
        if (im.channels() > 1) {
            cvtColor(im, im_gray, CV_BGR2GRAY);
        } else {
            im_gray = im;
        }

        //Let CMT process the frame
        cmt.processFrame(im_gray);

        //Output.
        if (output_flag)
        {
            int msecs = (int) cap.get(CV_CAP_PROP_POS_MSEC);
            output_file << frame << "," << msecs << ",";
            output_file << cmt.points_active.size() << ",";
            output_file << write_rotated_rect(cmt.bb_rot) << endl;
        }
        else
        {
            //TODO: Provide meaningful output
            FILE_LOG(logINFO) << "#" << frame << " active: " << cmt.points_active.size();
            FILE_LOG(logINFO) << "confidence: " << cmt.confidence;
        }

        //Display image and then quit if requested.
        char key = display(im, cmt);
        if(key == 'q') break;
    }

    //Close output file.
    if (output_flag) output_file.close();

    return 0;
}
Ejemplo n.º 19
0
int main(int argc, char** argv){
  Mat image, imageTemp, oldImage;
  int width, height;
  VideoCapture cap;//serve como uma declaração para os frams do video
  vector<Mat> planes;//planes[0], planes[1] e planes[2] armazenarão as componentes de cor Vermelho, Verde e Azul, respectivamente.
  Mat histR, histG, histB;// gera o histograma referente a cor da imagem capturada
  int nbins = 64;//tamanho do vetor utilizado para armazenar o histograma
  float range[] = {0, 256};
  const float *histrange = { range };
  bool uniform = true;
  bool acummulate = false;

  cap.open(0);//"abrindo a camera" para a captura

  if(!cap.isOpened()){
    cout << "cameras indisponiveis";
    return -1;
  }

  width  = cap.get(CV_CAP_PROP_FRAME_WIDTH);
  height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);

  cout << "largura = " << width << endl;
  cout << "altura  = " << height << endl;

  int histw = nbins, histh = nbins/2;
  Mat histImgR(histh, histw, CV_8UC3, Scalar(0,0,0));
  Mat histImgG(histh, histw, CV_8UC3, Scalar(0,0,0));
  Mat histImgB(histh, histw, CV_8UC3, Scalar(0,0,0));

  while(1){
    cap >> image;

    if (!cap.grab()) continue;
    //else{std::cout << "Bla" << std::endl;}

    image.copyTo(imageTemp);
    Compare(image, oldImage);
    imageTemp.copyTo(oldImage);

    if(entrou){
      putText( image, "Pare!", Point(height/2,width/2), CV_FONT_HERSHEY_COMPLEX, 2,
           Scalar(255, 255, 0), 5, 0 );
    }

    split (image, planes);
    //o calcHist calcula os componentes de cor do histograma
    calcHist(&planes[0], 1, 0, Mat(), histR, 1,
             &nbins, &histrange,
             uniform, acummulate);
    calcHist(&planes[1], 1, 0, Mat(), histG, 1,
             &nbins, &histrange,
             uniform, acummulate);
    calcHist(&planes[2], 1, 0, Mat(), histB, 1,
             &nbins, &histrange,
             uniform, acummulate);

    //Normalização do histograma entre o valor máximo e o valor mínimo dos componentes de cor
    normalize(histR, histR, 0, histImgR.rows, NORM_MINMAX, -1, Mat());
    normalize(histG, histB, 0, histImgR.rows, NORM_MINMAX, -1, Mat());
    normalize(histB, histB, 0, histImgR.rows, NORM_MINMAX, -1, Mat());

    //inicia os histogramas com preto(0)
    histImgR.setTo(Scalar(0));
    histImgG.setTo(Scalar(0));
    histImgB.setTo(Scalar(0));

    //parte da criação dos histogramas em forma de linha com a função line()
    for(int i=0; i<nbins; i++){
      line(histImgR, Point(i, histh),
           Point(i, cvRound(histR.at<float>(i))),
           Scalar(0, 0, 255), 1, 8, 0);
      line(histImgG, Point(i, histh),
           Point(i, cvRound(histG.at<float>(i))),
           Scalar(0, 255, 0), 1, 8, 0);
      line(histImgB, Point(i, histh),
           Point(i, cvRound(histB.at<float>(i))),
           Scalar(255, 0, 0), 1, 8, 0);
    }

    //passa para a parte superior direita da imagem da camera
    histImgR.copyTo(image(Rect(0, 0       ,nbins, histh)));
    histImgG.copyTo(image(Rect(0, histh   ,nbins, histh)));
    histImgB.copyTo(image(Rect(0, 2*histh ,nbins, histh)));

    imshow("image", image);
    if(waitKey(30) >= 0) break;
  }
  return 0;
}
Ejemplo n.º 20
0
int main(int argc, const char** argv)
{
    //Tuio Parameters
    TuioServer *tuioServer;
    TuioTime currentTime;
    vector<TuioCursor*> cursors;
    help();
    
    //CommandLineParser parser(argc, argv, keys);
    bool useCamera=true;
    int camera=0;
    string file;
    if(argc == 3){
        if (strcmp(argv[1],"-c") == 0){
            camera=atoi(argv[2]);
        }else if (strcmp(argv[1],"-fn") == 0){
            useCamera=false;
            file=argv[2];
        }
    }
    VideoCapture cap;
    bool update_bg_model = true;
    int frames=0;
    
    if( useCamera )
        cap.open(camera);
    else
        cap.open(file.c_str());

    if( !cap.isOpened() )
    {
        printf("can not open camera or video file\n");
        return -1;
    }

    namedWindow("image", CV_WINDOW_KEEPRATIO);
    namedWindow("foreground mask", CV_WINDOW_KEEPRATIO);
    namedWindow("foreground image", CV_WINDOW_KEEPRATIO);
    namedWindow("outlines", CV_WINDOW_KEEPRATIO);

    BackgroundSubtractorMOG bg_model(100,25,0.5,10);
    //bg_model.set("noiseSigma", 10);
    
/*    SimpleBlobDetector::Params blobParams;
    blobParams.minDistBetweenBlobs = 10.0f;
    blobParams.filterByInertia = false;
    blobParams.filterByConvexity = false;
    blobParams.filterByColor = false;
    blobParams.filterByCircularity = false;
    blobParams.filterByArea = true;
    blobParams.minArea = 200.0f;
    blobParams.maxArea = 10000.0f;
    
    SimpleBlobDetector blobs(blobParams);
    vector<KeyPoint> blobKeypoints;
  */  
    vector<vector<Point> > contours,contoursAll;
    vector<Moments> contourMoments;    
    vector<Point2d> centers;
    

    Mat img, fgmask, fgmask_old, fgimg, temp, outline_img;
    tuioServer = new TuioServer("rb-mbp.local",3333);    
    //tuioServer = new TuioServer();    
    int niters=1;
    for(;;)
    {
        cap >> img;
        contours.clear();
        contoursAll.clear();
        contourMoments.clear();
        centers.clear();
        
        if( img.empty() )
            break;

        //cvtColor(_img, img, COLOR_BGR2GRAY);

        if( fgimg.empty() )
          fgimg.create(img.size(), img.type());

        if( outline_img.empty() )
          outline_img.create(img.size(), img.type());

        //update the model
        bg_model(img, fgmask, update_bg_model ? -1 : 0);


        //Fill in gaps in the mask using morphology functions        
        dilate(fgmask, temp, Mat(), Point(-1,-1), niters); 
        //erode(temp, temp, Mat(), Point(-1,-1), niters*2);
        //dilate(temp, temp, Mat(), Point(-1,-1), niters);
        threshold(temp,temp,200,255,CV_THRESH_BINARY);     
        temp.copyTo(fgmask);
        
        //detect movement.  Update bgimage when we have movement or every n frames
        /*
        if (fgmask_old.empty()){
            fgmask_old.create(fgmask.size(),fgmask.type());
        }
        if (sum(fgmask-fgmask_old)[0] > 1e6 || frames > 5){
            update_bg_model = true;
            frames = 0;
        }else {
            update_bg_model = false;
            frames++;
        }
        fgmask.copyTo(fgmask_old);
        */
    //    blobs.detect(fgmask,blobKeypoints);
    //    drawKeypoints(img,blobKeypoints,img);        
        /*
        **Find blobs using Contours
        **
        */
        fgimg = Scalar::all(0);        
        outline_img = Scalar::all(0); 
        findContours(temp,contoursAll,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE);                
        
        for (int contour=0;contour < contoursAll.size(); contour++){
            Moments m = moments(contoursAll[contour]);
            if (m.m00 < 100)
                continue;
            contours.push_back(contoursAll[contour]);
            centers.push_back(Point2d(m.m10 / m.m00, m.m01 / m.m00));
            contourMoments.push_back(m);
        }
        drawContours(outline_img,contours,-1,Scalar(255,255,255));
        currentTime = TuioTime::getSessionTime();
        tuioServer->initFrame(currentTime);
        
        list<TuioCursor*> cursorList = tuioServer->getTuioCursors();
        for(list<TuioCursor *>::iterator cursor=cursorList.begin();cursor != cursorList.end();++cursor){
            tuioServer->removeTuioCursor(*cursor);
        }
            tuioServer->stopUntouchedMovingCursors();
            tuioServer->commitFrame();

        
        currentTime = TuioTime::getSessionTime();
        tuioServer->initFrame(currentTime);        
        for (int contour=0;contour < contours.size(); contour++){
            circle(outline_img,centers[contour],4,Scalar(0,0,255),-1);
            cursors.push_back(tuioServer->addTuioCursor((float)(centers[contour].x)/800.0,(float)(centers[contour]).y/600.0));
        }
        tuioServer->commitFrame();        
        
        
        img.copyTo(fgimg, fgmask);
        
        //bg_model.getBackgroundImage(bgimg);
        imshow("image", img);
        imshow("foreground mask", fgmask);
        
        imshow("foreground image", fgimg);
        imshow("outlines",outline_img);
        
        //if(!bgimg.empty())
        //  imshow("mean background image", bgimg );
        
        char k = (char)waitKey(30);
        if( k == 27 ) break;
        if( k == ' ' )
        {
            update_bg_model = !update_bg_model;
            if(update_bg_model)
                printf("Background update is on\n");
            else
                printf("Background update is off\n");
        }
    }

    return 0;
}
int do_dense_track(char* video,int flag,FILE* info_file,FILE* traj_file,FILE* hog_file,FILE* hof_file,FILE* mbhx_file,FILE* mbhy_file){
	bool scale = false;
	VideoCapture capture;
	//int ooo = capture.isOpened();

	//Mat image4;
    //image4 = imread("C:\\Users\\Owner\\Desktop\\Dalia.jpg", CV_LOAD_IMAGE_COLOR);   // Read the file

    //if(! image4.data )                              // Check for invalid input
    //{
    //    //cout <<  "Could not open or find the image" << std::endl ;
    //    return -1;
    //}

    //namedWindow( "Display window", WINDOW_AUTOSIZE );// Create a window for display.
    //imshow( "Display window", image4 );                   // Show our image inside it.

    //waitKey(0); 
//std::cout<<"4"<<std::endl;
	capture.open(video);
	//capture.open("fish_head-0001.avi");
//std::cout<<"5"<<std::endl;
	if(!capture.isOpened()) {
		fprintf(stderr, "Could not initialize capturing..\n");
		return -1;
	}

	int frame_num = 0;
	TrackInfo trackInfo;
	DescInfo hogInfo, hofInfo, mbhInfo;

	InitTrackInfo(&trackInfo, track_length, init_gap);
	InitDescInfo(&hogInfo, 8, false, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&hofInfo, 9, true, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&mbhInfo, 8, false, patch_size, nxy_cell, nt_cell);

	SeqInfo seqInfo;
	InitSeqInfo(&seqInfo, video);

	if(flag)
		seqInfo.length = end_frame - start_frame + 1;

//	fprintf(stderr, "video size, length: %d, width: %d, height: %d\n", seqInfo.length, seqInfo.width, seqInfo.height);

	if(show_track == 1)
		namedWindow("DenseTrack", 0);

	Mat image, prev_grey, grey;

	std::vector<float> fscales(0);
	std::vector<Size> sizes(0);

	std::vector<Mat> prev_grey_pyr(0), grey_pyr(0), flow_pyr(0);
	std::vector<Mat> prev_poly_pyr(0), poly_pyr(0); // for optical flow

	std::vector<std::list<Track> > xyScaleTracks;
	int init_counter = 0; // indicate when to detect new feature points
	while(true) {
		Mat frame;
		int i;
		//, j, 
		int c;

		// get a new frame
		capture >> frame;

		//namedWindow( "Display window", WINDOW_AUTOSIZE );// Create a window for display
		//imshow("farme frame",frame);
		//waitKey(0); 



		if(frame.empty())
			break;

		if(scale){
			std::cout<<"org img size = "<<frame.cols<<"x"<<frame.rows<<std::endl;
			Size size(0,0);
			double xyScale = sqrtf(double(240.0*320.0)/(frame.cols*frame.rows));
			resize(frame,frame,size,xyScale,xyScale);
			std::cout<<"new img size = "<<frame.cols<<"x"<<frame.rows<<std::endl;
		}


		

        //printf("frame_num: %d\n",frame_num);

		if(frame_num < start_frame || frame_num > end_frame) {
			frame_num++;
			continue;
		}

		if(frame_num == start_frame) {
			image.create(frame.size(), CV_8UC3);
			grey.create(frame.size(), CV_8UC1);
			prev_grey.create(frame.size(), CV_8UC1);

			InitPry(frame, fscales, sizes);

			BuildPry(sizes, CV_8UC1, prev_grey_pyr);
			BuildPry(sizes, CV_8UC1, grey_pyr);

			BuildPry(sizes, CV_32FC2, flow_pyr);
			BuildPry(sizes, CV_32FC(5), prev_poly_pyr);
			BuildPry(sizes, CV_32FC(5), poly_pyr);

			xyScaleTracks.resize(scale_num);

			frame.copyTo(image);
			cvtColor(image, prev_grey, CV_BGR2GRAY);

			for(int iScale = 0; iScale < scale_num; iScale++) {
				if(iScale == 0)
					prev_grey.copyTo(prev_grey_pyr[0]);
				else
					resize(prev_grey_pyr[iScale-1], prev_grey_pyr[iScale], prev_grey_pyr[iScale].size(), 0, 0, INTER_LINEAR);

				// dense sampling feature points
				std::vector<Point2f> points(0);
				DenseSample(prev_grey_pyr[iScale], points, quality, min_distance);

				// save the feature points
				std::list<Track>& tracks = xyScaleTracks[iScale];
				for(i = 0; i < points.size(); i++)
					tracks.push_back(Track(points[i], trackInfo, hogInfo, hofInfo, mbhInfo));
			}

			// compute polynomial expansion
			my::FarnebackPolyExpPyr(prev_grey, prev_poly_pyr, fscales, 7, 1.5);

			frame_num++;
			continue;
		}

		init_counter++;
		frame.copyTo(image);
		cvtColor(image, grey, CV_BGR2GRAY);

		// compute optical flow for all scales once
		my::FarnebackPolyExpPyr(grey, poly_pyr, fscales, 7, 1.5);
		my::calcOpticalFlowFarneback(prev_poly_pyr, poly_pyr, flow_pyr, 10, 2);

		for(int iScale = 0; iScale < scale_num; iScale++) {
			if(iScale == 0)
				grey.copyTo(grey_pyr[0]);
			else
				resize(grey_pyr[iScale-1], grey_pyr[iScale], grey_pyr[iScale].size(), 0, 0, INTER_LINEAR);

			int width = grey_pyr[iScale].cols;
			int height = grey_pyr[iScale].rows;

			// compute the integral histograms
			DescMat* hogMat = InitDescMat(height+1, width+1, hogInfo.nBins);

			HogComp(prev_grey_pyr[iScale], hogMat->desc, hogInfo);

			DescMat* hofMat = InitDescMat(height+1, width+1, hofInfo.nBins);

			HofComp(flow_pyr[iScale], hofMat->desc, hofInfo);

			DescMat* mbhMatX = InitDescMat(height+1, width+1, mbhInfo.nBins);

			DescMat* mbhMatY = InitDescMat(height+1, width+1, mbhInfo.nBins);

			MbhComp(flow_pyr[iScale], mbhMatX->desc, mbhMatY->desc, mbhInfo);

			// track feature points in each scale separately
			std::list<Track>& tracks = xyScaleTracks[iScale];
			for (std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end();) {
				int index = iTrack->index;
				Point2f prev_point = iTrack->point[index];
				int x = std::min<int>(std::max<int>(cvRound(prev_point.x), 0), width-1);
				int y = std::min<int>(std::max<int>(cvRound(prev_point.y), 0), height-1);

				Point2f point;
				point.x = prev_point.x + flow_pyr[iScale].ptr<float>(y)[2*x];
				point.y = prev_point.y + flow_pyr[iScale].ptr<float>(y)[2*x+1];
 
				if(point.x <= 0 || point.x >= width || point.y <= 0 || point.y >= height) {
					iTrack = tracks.erase(iTrack);
					continue;
				}

				// get the descriptors for the feature point
				RectInfo rect;
				GetRect(prev_point, rect, width, height, hogInfo);
				GetDesc(hogMat, rect, hogInfo, iTrack->hog, index);
				GetDesc(hofMat, rect, hofInfo, iTrack->hof, index);
				GetDesc(mbhMatX, rect, mbhInfo, iTrack->mbhX, index);
				GetDesc(mbhMatY, rect, mbhInfo, iTrack->mbhY, index);
				iTrack->addPoint(point);

				// draw the trajectories at the first scale
				if(show_track == 1 && iScale == 0)
					DrawTrack(iTrack->point, iTrack->index, fscales[iScale], image);

				// if the trajectory achieves the maximal length
				if(iTrack->index >= trackInfo.length) {
					std::vector<Point2f> trajectory(trackInfo.length+1);
					for(int i = 0; i <= trackInfo.length; ++i)
						trajectory[i] = iTrack->point[i]*fscales[iScale];
				
					float mean_x(0), mean_y(0), var_x(0), var_y(0), length(0);
					if(IsValid(trajectory, mean_x, mean_y, var_x, var_y, length)) {


					/*

					The first 10 elements are information about the trajectory:

					frameNum:     The trajectory ends on which frame
					mean_x:       The mean value of the x coordinates of the trajectory
					mean_y:       The mean value of the y coordinates of the trajectory
					var_x:        The variance of the x coordinates of the trajectory
					var_y:        The variance of the y coordinates of the trajectory
					length:       The length of the trajectory
					scale:        The trajectory is computed on which scale
					x_pos:        The normalized x position w.r.t. the video (0~0.999), for spatio-temporal pyramid 
					y_pos:        The normalized y position w.r.t. the video (0~0.999), for spatio-temporal pyramid 
					t_pos:        The normalized t position w.r.t. the video (0~0.999), for spatio-temporal pyramid
					The following element are five descriptors concatenated one by one:

					Trajectory:    2x[trajectory length] (default 30 dimension) 
					HOG:           8x[spatial cells]x[spatial cells]x[temporal cells] (default 96 dimension)
					HOF:           9x[spatial cells]x[spatial cells]x[temporal cells] (default 108 dimension)
					MBHx:          8x[spatial cells]x[spatial cells]x[temporal cells] (default 96 dimension)
					MBHy:          8x[spatial cells]x[spatial cells]x[temporal cells] (default 96 dimension)

					*/


						fprintf(info_file, "%d\t%f\t%f\t%f\t%f\t%f\t%f\t", frame_num, mean_x, mean_y, var_x, var_y, length, fscales[iScale]);

						// for spatio-temporal pyramid
						fprintf(info_file,"%f\t", std::min<float>(std::max<float>(mean_x/float(seqInfo.width), 0), 0.999));
						fprintf(info_file,"%f\t", std::min<float>(std::max<float>(mean_y/float(seqInfo.height), 0), 0.999));
						fprintf(info_file,"%f\n", std::min<float>(std::max<float>((frame_num - trackInfo.length/2.0 - start_frame)/float(seqInfo.length), 0), 0.999));
					
						// output the trajectory
						for (int i = 0; i < trackInfo.length; ++i)
							fprintf(traj_file,"%f\t%f\t", trajectory[i].x,trajectory[i].y);
						fprintf(traj_file,"\n");

						PrintDesc(hog_file,iTrack->hog, hogInfo, trackInfo);
						PrintDesc(hof_file,iTrack->hof, hofInfo, trackInfo);
						PrintDesc(mbhx_file,iTrack->mbhX, mbhInfo, trackInfo);
						PrintDesc(mbhy_file,iTrack->mbhY, mbhInfo, trackInfo);
						//printf("frame_num: %d\n",frame_num);
					}

					iTrack = tracks.erase(iTrack);
					continue;
				}
				++iTrack;
			}
			ReleDescMat(hogMat);
			ReleDescMat(hofMat);
			ReleDescMat(mbhMatX);
			ReleDescMat(mbhMatY);

			if(init_counter != trackInfo.gap)
				continue;

			// detect new feature points every initGap frames
			std::vector<Point2f> points(0);
			for(std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); iTrack++)
				points.push_back(iTrack->point[iTrack->index]);

			DenseSample(grey_pyr[iScale], points, quality, min_distance);
			// save the new feature points
			for(i = 0; i < points.size(); i++)
				tracks.push_back(Track(points[i], trackInfo, hogInfo, hofInfo, mbhInfo));
		}

		init_counter = 0;
		grey.copyTo(prev_grey);
		for(i = 0; i < scale_num; i++) {
			grey_pyr[i].copyTo(prev_grey_pyr[i]);
			poly_pyr[i].copyTo(prev_poly_pyr[i]);
		}

		frame_num++;

		if( show_track == 1 ) {
			imshow( "DenseTrack", image);
			c = cvWaitKey(3);
			if((char)c == 27) break;
		}
	}

	if( show_track == 1 )
		destroyWindow("DenseTrack");

	return 0;
}
Ejemplo n.º 22
0
int main(int argc, const char** argv)
{
    help();

    VideoCapture cap;
    Rect trackWindow;
    int hsize = 16;
    float hranges[] = {0,180};
    const float* phranges = hranges;
    CommandLineParser parser(argc, argv, keys);
    int camNum = parser.get<int>("c");
    vmin = parser.get<int>("vmin");
    vmax = parser.get<int>("vmax");
    smin = parser.get<int>("smin");
    bool startPlayback = parser.get<bool>("p");
    string file = parser.get<string>("1");

    if (file.empty()) {
        cout << "Using camera " << camNum << endl;
        cap.open(camNum);
    } else {
        cout << "Using file " << file << endl;
        cap.open(file.c_str());
    }

    if (!cap.isOpened()) {
        cout << "***Could not initialize capturing...***\n";
        cout << "Current parameter's value: \n";
        parser.printParams();
        return -1;
    }

    namedWindow("Histogram", 0);
    namedWindow("CamShift Demo", 0);
    setMouseCallback("CamShift Demo", onMouse, 0);
    createTrackbar("Vmin", "CamShift Demo", &vmin, 256, 0);
    createTrackbar("Vmax", "CamShift Demo", &vmax, 256, 0);
    createTrackbar("Smin", "CamShift Demo", &smin, 256, 0);

    Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
    deque<pair<int, Point2f> > points;
    bool paused = !startPlayback;
    cap >> frame;
    if (!frame.empty())
        frame.copyTo(image);
    set_selection(parser.get<string>("rect"));
    int frameCount = 0;
    int eCount = 0;
    Stats stats;
    Stats stats_cls;

    for (; !frame.empty();) {

        if (!paused) {
            frame.copyTo(image);
            cvtColor(image, hsv, CV_BGR2HSV);

            if (trackObject) {
                int _vmin = vmin, _vmax = vmax;

                inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
                        Scalar(180, 256, MAX(_vmin, _vmax)), mask);
                int ch[] = {0, 0};
                hue.create(hsv.size(), hsv.depth());
                mixChannels(&hsv, 1, &hue, 1, ch, 1);

                if (trackObject < 0) {
                    Mat roi(hue, selection), maskroi(mask, selection);
                    calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
                    normalize(hist, hist, 0, 255, CV_MINMAX);

                    trackWindow = selection;
                    trackObject = 1;

                    histimg = Scalar::all(0);
                    int binW = histimg.cols / hsize;
                    Mat buf(1, hsize, CV_8UC3);
                    for (int i = 0; i < hsize; ++i)
                        buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
                    cvtColor(buf, buf, CV_HSV2BGR);

                    for (int i = 0; i < hsize; ++i) {
                        int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
                        rectangle(histimg, Point(i*binW,histimg.rows),
                                  Point((i+1)*binW,histimg.rows - val),
                                  Scalar(buf.at<Vec3b>(i)), -1, 8);
                    }
                }

                calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
                backproj &= mask;
                RotatedRect trackBox = CamShift(backproj, trackWindow,
                                                TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1));
                // draw ten last locations
                Point2f pts[4];
                trackBox.points(pts);
                Point2f center = (pts[0] + pts[2]) * 0.5;
                points.push_back(make_pair(frameCount, center));
                if (points.size() > Stats::PREDCOUNT)
                    points.pop_front();
                int alpha = 255;
                for (deque<pair<int, Point2f> >::const_reverse_iterator it = points.rbegin(); it != points.rend(); ++it) {
                    circle(image, it->second, 4, Scalar(255,0,0,alpha), 2);
                    alpha *= 0.85;
                }
                stats.print_stats(points); // does nothing with < 10 points
                stats_cls.print_stats(points, false); // does nothing with < 10 points
                // predict next occurance
                if (points.size() >= 5) {
                    vector<int> frames;
                    vector<float> points_x;
                    vector<float> points_y;
                    for (deque<pair<int, Point2f> >::const_iterator it = points.begin(); it != points.end(); ++it) {
                        frames.push_back(it->first);
                        points_x.push_back(it->second.x);
                        points_y.push_back(it->second.y);
                    }
                    Spline<int, float> spl_x(frames, points_x);
                    Spline<int, float> spl_y(frames, points_y);
                    LSFit<3, int, float> cls_x(frames, points_x);
                    LSFit<3, int, float> cls_y(frames, points_y);
                    LSFit<3, int, float> clsw_x(frames, points_x, true);
                    LSFit<3, int, float> clsw_y(frames, points_y, true);
                    vector<float> curpred; // stats
                    vector<float> curpred_cls; // stats
                    // draw next predicted ten points
                    for (int i = 1; i <= Stats::PREDCOUNT; ++i) {
                        float x = spl_x[frameCount + i];
                        float y = spl_y[frameCount + i];
                        float c_x = cls_x[frameCount + i];
                        float c_y = cls_y[frameCount + i];
                        float cw_x = clsw_x[frameCount + i];
                        float cw_y = clsw_y[frameCount + i];
                        curpred.push_back(sqrt(x*x+y*y)); // stats
                        curpred_cls.push_back(sqrt(c_x*c_x+c_y*c_y)); // stats
                        Point2f p(x, y);
                        circle(image, p, 4, Scalar(0,255,0), 2);
                        Point2f c_p(c_x, c_y);
                        circle(image, c_p, 4, Scalar(255,255,255), 2);
                        Point2f cw_p(cw_x, cw_y);
                        circle(image, cw_p, 4, Scalar(255,200,200), 2);
                    }
                    stats.add_pred(curpred);
                    stats_cls.add_pred(curpred_cls);
                }
                if (trackWindow.area() <= 1) {
                    int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5)/6;
                    trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
                                       trackWindow.x + r, trackWindow.y + r) &
                                  Rect(0, 0, cols, rows);
                }

                if (backprojMode)
                    cvtColor(backproj, image, CV_GRAY2BGR);
                ellipse(image, trackBox, Scalar(0,0,255), 3, CV_AA);
            }
        }

        if ((selectObject || frameCount == 0) && selection.width > 0 && selection.height > 0) {
            // causes flickering since it re-inverts itself every frame
            Mat roi(image, selection);
            bitwise_not(roi, roi);
        }

        imshow("CamShift Demo", image);
        if (showHist)
            imshow("Histogram", histimg);

        char c = (char)waitKey(10);
        if (c == 27)
            break;
        switch (c) {
        case 'b':
            backprojMode = !backprojMode;
            break;
        case 'c':
            trackObject = 0;
            histimg = Scalar::all(0);
            break;
        case 'h':
            showHist = !showHist;
            if (!showHist)
                destroyWindow("Histogram");
            else
                namedWindow("Histogram", 1);
            break;
        case 'p':
            paused = !paused;
            break;
        default:
            ;
        }

        if (!paused) {
            cap >> frame;
            ++frameCount;
        }
    }
Ejemplo n.º 23
0
void RecognitionDemos( Mat& full_image, Mat& template1, Mat& template2, Mat& template1locations, Mat& template2locations, VideoCapture& bicycle_video, Mat& bicycle_background, Mat& bicycle_model, VideoCapture& people_video, CascadeClassifier& cascade, Mat& numbers, Mat& good_orings, Mat& bad_orings, Mat& unknown_orings )
{
	Timestamper* timer = new Timestamper();

	// Principal Components Analysis
	PCASimpleExample();
    char ch = cvWaitKey();
	cvDestroyAllWindows();

	PCAFaceRecognition();
    ch = cvWaitKey();
	cvDestroyAllWindows();

	// Statistical Pattern Recognition
	Mat gray_numbers,binary_numbers;
	cvtColor(numbers, gray_numbers, CV_BGR2GRAY);
	threshold(gray_numbers,binary_numbers,128,255,THRESH_BINARY_INV);
    vector<vector<Point>> contours;
	vector<Vec4i> hierarchy;
	findContours(binary_numbers,contours,hierarchy,CV_RETR_TREE,CV_CHAIN_APPROX_NONE);
	Mat contours_image = Mat::zeros(binary_numbers.size(), CV_8UC3);
	contours_image = Scalar(255,255,255);
	// Do some processing on all contours (objects and holes!)
	vector<RotatedRect> min_bounding_rectangle(contours.size());
	vector<vector<Point>> hulls(contours.size());
	vector<vector<int>> hull_indices(contours.size());
	vector<vector<Vec4i>> convexity_defects(contours.size());
	vector<Moments> contour_moments(contours.size());
	for (int contour_number=0; (contour_number<(int)contours.size()); contour_number++)
	{
		if (contours[contour_number].size() > 10)
		{
			min_bounding_rectangle[contour_number] = minAreaRect(contours[contour_number]);
			convexHull(contours[contour_number], hulls[contour_number]);
			convexHull(contours[contour_number], hull_indices[contour_number]);
			convexityDefects( contours[contour_number], hull_indices[contour_number], convexity_defects[contour_number]);
			contour_moments[contour_number] = moments( contours[contour_number] );
		}
	}
	for (int contour_number=0; (contour_number>=0); contour_number=hierarchy[contour_number][0])
	{
		if (contours[contour_number].size() > 10)
		{
        Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
        drawContours( contours_image, contours, contour_number, colour, CV_FILLED, 8, hierarchy );
		char output[500];
		double area = contourArea(contours[contour_number])+contours[contour_number].size()/2+1;
		// Process any holes (removing the area from the are of the enclosing contour)
		for (int hole_number=hierarchy[contour_number][2]; (hole_number>=0); hole_number=hierarchy[hole_number][0])
		{
			area -= (contourArea(contours[hole_number])-contours[hole_number].size()/2+1);
			Scalar colour( rand()&0x7F, rand()&0x7F, rand()&0x7F );
 			drawContours( contours_image, contours, hole_number, colour, CV_FILLED, 8, hierarchy );
			sprintf(output,"Area=%.0f", contourArea(contours[hole_number])-contours[hole_number].size()/2+1);
			Point location( contours[hole_number][0].x +20, contours[hole_number][0].y +5 );
			putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		}
		// Draw the minimum bounding rectangle
		Point2f bounding_rect_points[4];
		min_bounding_rectangle[contour_number].points(bounding_rect_points);
		line( contours_image, bounding_rect_points[0], bounding_rect_points[1], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[1], bounding_rect_points[2], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[2], bounding_rect_points[3], Scalar(0, 0, 127));
		line( contours_image, bounding_rect_points[3], bounding_rect_points[0], Scalar(0, 0, 127));
		float bounding_rectangle_area = min_bounding_rectangle[contour_number].size.area();
		// Draw the convex hull
        drawContours( contours_image, hulls, contour_number, Scalar(127,0,127) );
		// Highlight any convexities
		int largest_convexity_depth=0;
		for (int convexity_index=0; convexity_index < (int)convexity_defects[contour_number].size(); convexity_index++)
		{
			if (convexity_defects[contour_number][convexity_index][3] > largest_convexity_depth)
				largest_convexity_depth = convexity_defects[contour_number][convexity_index][3];
			if (convexity_defects[contour_number][convexity_index][3] > 256*2)
			{
				line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][0]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
				line( contours_image, contours[contour_number][convexity_defects[contour_number][convexity_index][1]], contours[contour_number][convexity_defects[contour_number][convexity_index][2]], Scalar(0,0, 255));
			}
		}
		double hu_moments[7];
		HuMoments( contour_moments[contour_number], hu_moments );
		sprintf(output,"Perimeter=%d, Area=%.0f, BArea=%.0f, CArea=%.0f", contours[contour_number].size(),area,min_bounding_rectangle[contour_number].size.area(),contourArea(hulls[contour_number]));
		Point location( contours[contour_number][0].x, contours[contour_number][0].y-3 );
		putText( contours_image, output, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		sprintf(output,"HuMoments = %.2f, %.2f, %.2f", hu_moments[0],hu_moments[1],hu_moments[2]);
		Point location2( contours[contour_number][0].x+100, contours[contour_number][0].y-3+15 );
		putText( contours_image, output, location2, FONT_HERSHEY_SIMPLEX, 0.4, colour );
		}
	}
	imshow("Shape Statistics", contours_image );
	char c = cvWaitKey();
	cvDestroyAllWindows();

	// Support Vector Machine
	imshow("Good - original",good_orings);
	imshow("Defective - original",bad_orings);
	imshow("Unknown - original",unknown_orings);
	SupportVectorMachineDemo(good_orings,"Good",bad_orings,"Defective",unknown_orings);
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Template Matching
	Mat display_image, correlation_image;
	full_image.copyTo( display_image );
	double min_correlation, max_correlation;
	Mat matched_template_map;
	int result_columns =  full_image.cols - template1.cols + 1;
	int result_rows = full_image.rows - template1.rows + 1;
	correlation_image.create( result_columns, result_rows, CV_32FC1 );
	timer->reset();
	double before_tick_count = static_cast<double>(getTickCount());
	matchTemplate( full_image, template1, correlation_image, CV_TM_CCORR_NORMED );
	double after_tick_count = static_cast<double>(getTickCount());
	double duration_in_ms = 1000.0*(after_tick_count-before_tick_count)/getTickFrequency();
	minMaxLoc( correlation_image, &min_correlation, &max_correlation );
	FindLocalMaxima( correlation_image, matched_template_map, max_correlation*0.99 );
	timer->recordTime("Template Matching (1)");
	Mat matched_template_display1;
	cvtColor(matched_template_map, matched_template_display1, CV_GRAY2BGR);
	Mat correlation_window1 = convert_32bit_image_for_display( correlation_image, 0.0 );
	DrawMatchingTemplateRectangles( display_image, matched_template_map, template1, Scalar(0,0,255) );
	double precision, recall, accuracy, specificity, f1;
	Mat template1locations_gray;
	cvtColor(template1locations, template1locations_gray, CV_BGR2GRAY);
	CompareRecognitionResults( matched_template_map, template1locations_gray, precision, recall, accuracy, specificity, f1 );
	char results[400];
	Scalar colour( 255, 255, 255);
	sprintf( results, "precision=%.2f", precision);
	Point location( 7, 213 );
	putText( display_image, "Results (1)", location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "recall=%.2f", recall);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "accuracy=%.2f", accuracy);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "specificity=%.2f", specificity);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "f1=%.2f", f1);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
  
	result_columns =  full_image.cols - template2.cols + 1;
	result_rows = full_image.rows - template2.rows + 1;
	correlation_image.create( result_columns, result_rows, CV_32FC1 );
	timer->ignoreTimeSinceLastRecorded();
	matchTemplate( full_image, template2, correlation_image, CV_TM_CCORR_NORMED );
	minMaxLoc( correlation_image, &min_correlation, &max_correlation );
	FindLocalMaxima( correlation_image, matched_template_map, max_correlation*0.99 );
	timer->recordTime("Template Matching (2)");
	Mat matched_template_display2;
	cvtColor(matched_template_map, matched_template_display2, CV_GRAY2BGR);
	Mat correlation_window2 = convert_32bit_image_for_display( correlation_image, 0.0 );
	DrawMatchingTemplateRectangles( display_image, matched_template_map, template2, Scalar(0,0,255) );
	timer->putTimes(display_image);
	Mat template2locations_gray;
	cvtColor(template2locations, template2locations_gray, CV_BGR2GRAY);
	CompareRecognitionResults( matched_template_map, template2locations_gray, precision, recall, accuracy, specificity, f1 );
	sprintf( results, "precision=%.2f", precision);
	location.x = 123;
	location.y = 213;
	putText( display_image, "Results (2)", location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "recall=%.2f", recall);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "accuracy=%.2f", accuracy);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "specificity=%.2f", specificity);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	sprintf( results, "f1=%.2f", f1);
	location.y += 13;
	putText( display_image, results, location, FONT_HERSHEY_SIMPLEX, 0.4, colour );
	Mat correlation_display1, correlation_display2;
	cvtColor(correlation_window1, correlation_display1, CV_GRAY2BGR);
	cvtColor(correlation_window2, correlation_display2, CV_GRAY2BGR);

	Mat output1 = JoinImagesVertically(template1,"Template (1)",correlation_display1,"Correlation (1)",4);
	Mat output2 = JoinImagesVertically(output1,"",matched_template_display1,"Local maxima (1)",4);
	Mat output3 = JoinImagesVertically(template2,"Template (2)",correlation_display2,"Correlation (2)",4);
	Mat output4 = JoinImagesVertically(output3,"",matched_template_display2,"Local maxima (2)",4);
	Mat output5 = JoinImagesHorizontally( full_image, "Original Image", output2, "", 4 );
	Mat output6 = JoinImagesHorizontally( output5, "", output4, "", 4 );
	Mat output7 = JoinImagesHorizontally( output6, "", display_image, "", 4 );
	imshow( "Template matching result", output7 );
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Chamfer Matching
    Mat model_gray,model_edges,model_edges2;
	cvtColor(bicycle_model, model_gray, CV_BGR2GRAY);
	threshold(model_gray,model_edges,127,255,THRESH_BINARY);
	Mat current_frame;
	bicycle_video.set(CV_CAP_PROP_POS_FRAMES,400);  // Just in case the video has already been used.
	bicycle_video >> current_frame;
	bicycle_background = current_frame.clone();
	bicycle_video.set(CV_CAP_PROP_POS_FRAMES,500); 
	timer->reset();
	int count = 0;
	while (!current_frame.empty() && (count < 8))
    {
		Mat result_image = current_frame.clone();
		count++;
		Mat difference_frame, difference_gray, current_edges;
		absdiff(current_frame,bicycle_background,difference_frame);
		cvtColor(difference_frame, difference_gray, CV_BGR2GRAY);
		Canny(difference_frame, current_edges, 100, 200, 3);

		vector<vector<Point> > results;
		vector<float> costs;
		threshold(model_gray,model_edges,127,255,THRESH_BINARY);
		Mat matching_image, chamfer_image, local_minima;
		timer->ignoreTimeSinceLastRecorded();
		threshold(current_edges,current_edges,127,255,THRESH_BINARY_INV);
		distanceTransform( current_edges, chamfer_image, CV_DIST_L2 , 3);
		timer->recordTime("Chamfer Image");
		ChamferMatching( chamfer_image, model_edges, matching_image );
		timer->recordTime("Matching");
		FindLocalMinima( matching_image, local_minima, 500.0 );
		timer->recordTime("Find Minima");
		DrawMatchingTemplateRectangles( result_image, local_minima, model_edges, Scalar( 255, 0, 0 ) );
		Mat chamfer_display_image = convert_32bit_image_for_display( chamfer_image );
		Mat matching_display_image = convert_32bit_image_for_display( matching_image );
		//timer->putTimes(result_image);
		Mat current_edges_display, local_minima_display, model_edges_display, colour_matching_display_image, colour_chamfer_display_image;
		cvtColor(current_edges, current_edges_display, CV_GRAY2BGR);
		cvtColor(local_minima, local_minima_display, CV_GRAY2BGR);
		cvtColor(model_edges, model_edges_display, CV_GRAY2BGR);
		cvtColor(matching_display_image, colour_matching_display_image, CV_GRAY2BGR);
		cvtColor(chamfer_display_image, colour_chamfer_display_image, CV_GRAY2BGR);

		Mat output1 = JoinImagesVertically(current_frame,"Video Input",current_edges_display,"Edges from difference", 4);
		Mat output2 = JoinImagesVertically(output1,"",model_edges_display,"Model", 4);
		Mat output3 = JoinImagesVertically(bicycle_background,"Static Background",colour_chamfer_display_image,"Chamfer image", 4);
		Mat output4 = JoinImagesVertically(output3,"",colour_matching_display_image,"Degree of fit", 4);
		Mat output5 = JoinImagesVertically(difference_frame,"Difference",result_image,"Result", 4);
		Mat output6 = JoinImagesVertically(output5,"",local_minima_display,"Local minima", 4);
		Mat output7 = JoinImagesHorizontally( output2, "", output4, "", 4 );
		Mat output8 = JoinImagesHorizontally( output7, "", output6, "", 4 );
		imshow("Chamfer matching", output8);
		c = waitKey(1000);  // This makes the image appear on screen
		bicycle_video >> current_frame;
	}
	c = cvWaitKey();
	cvDestroyAllWindows();

	// Cascade of Haar classifiers (most often shown for face detection).
    VideoCapture camera;
	camera.open(1);
	camera.set(CV_CAP_PROP_FRAME_WIDTH, 320);
	camera.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
    if( camera.isOpened() )
	{
		timer->reset();
		Mat current_frame;
		do {
			camera >> current_frame;
			if( current_frame.empty() )
				break;
			vector<Rect> faces;
			timer->ignoreTimeSinceLastRecorded();
			Mat gray;
			cvtColor( current_frame, gray, CV_BGR2GRAY );
			equalizeHist( gray, gray );
			cascade.detectMultiScale( gray, faces, 1.1, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
			timer->recordTime("Haar Classifier");
			for( int count = 0; count < (int)faces.size(); count++ )
				rectangle(current_frame, faces[count], cv::Scalar(255,0,0), 2);
			//timer->putTimes(current_frame);
			imshow( "Cascade of Haar Classifiers", current_frame );
			c = waitKey(10);  // This makes the image appear on screen
        } while (c == -1);
	}
int main(int argc, char* argv[])
{


	VideoCapture cap;
	Mat result, frame,gray_bg, frame_gray, image,silh1;
	cap.open("video_finale_Mithilesh_0Deg_input.mov");
	cap >> frame;
	frame.copyTo(image);
	// Video Write 
	
	VideoWriter outputVideo,  outputVideo1, outputVideo2, outputVideo3;       
	int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC)); 
	Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH),    // Acquire input size
		(int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));

	int count = 0;
	int a;
	//Create a new window.
	cvNamedWindow("My Window", CV_WINDOW_AUTOSIZE);

	Mat fgimg, fgmask;
	Mat Mean   = Mat::zeros(frame.rows, frame.cols,CV_32FC3); 

	Mat bgimg, mean_rings;

	vector<Mat> image_array;


	for(;;)
	{
		cap >> frame;
		frame.copyTo(image);
		if (frame.empty())
			break;


		if (count < 30)
		{
			image_array.push_back(image);

			if( fgimg.empty() )
				fgimg.create(image.size(), image.type());

			//bg_model(image, fgmask, -1 /*update_bg_model ? -1 : 0*/);

			//fgimg = Scalar::all(0);
			//image.copyTo(fgimg, fgmask);

			//bg_model.getBackgroundImage(bgimg);

		}
		count++;
		if (count == 31)
		{
			int size_l = image_array.size();
			for (int i = 0; i < size_l; i++)
			{
				accumulate(image_array[i], Mean);
			}
			Mean = Mean / size_l;
			//Mat Mean_image = ;
			Mean.convertTo(Mean,CV_8U);
			//Normalize_Color(Mean, Mean);
			//imwrite("D:\\Videos\\mean_image.jpg", Mean);
			//imshow("mean",Mean);
			//if(!bgimg.empty())
				//imshow("mean background image", bgimg );
			//imwrite("D:\\Videos\\GMM_bgd_image.jpg", bgimg);
			//hist_image(Mean);
			Color_Segmentation(Mean, mean_rings);
		}

		if (count > 31)
		{
			Mat difference, difference_gray, tool_image, tool_image_gray;
			Normalize_Color(Mean, Mean);
			Normalize_Color(image, image);
			absdiff( Mean, image, difference ); // get difference between frames
			//string address = "D:\\Videos\\Tools\\img_" + stringcount + ".jpg";
			//cv::imwrite(address.toUtf8().constData(),FINAL_IM_VEC[i]);
			ostringstream convert;
			convert << "D:/Videos/Tool/img" << count << ".jpg";
			//cvSaveImage(convert.str().c_str(), difference);
			string filename = convert.str();
			//cvSaveImage(filename.c_str(), img2);
			//imwrite(filename.c_str(), difference);
			//imshow("Difference",difference);
			//hist_image(difference);
			//waitKey(10);


			cvtColor( difference, difference_gray, CV_BGR2GRAY ); // convert frame to grayscale
			
			
			//cvtColor( frame, frame_gray, CV_BGR2GRAY ); // convert frame to grayscale

			//absdiff( gray_bg, frame_gray, difference_gray1 ); // get difference between frames


			//blur( difference_gray1, difference_gray1, Size(3,3) );

			// Decision making on which difference to take
			//Scalar avg_diff = mean( difference_gray );
			//Scalar avg_diff1 = mean( difference_gray1 );
			Mat image_rings;
			Color_Segmentation(image, image_rings);

			Mat diff_ring, diff_ring_image;
			//cvtColor(mean_rings, mean_rings, CV_HSV2BGR);
			//cvtColor(image_rings, image_rings, CV_HSV2BGR);

			absdiff(mean_rings, image_rings, diff_ring);
			
			imshow("Diff_rings", diff_ring);
			//Mat difference_hsv;
			//cvtColor(diff_ring, difference_hsv, CV_HSV2BGR);
			
			difference &= ~diff_ring;
			//cvtColor(difference_hsv,difference,CV_HSV2BGR);
			imshow("Diff_", difference);
			
			vector<Mat>channel1;
			channel1.push_back(difference_gray);
			channel1.push_back(difference_gray);
			channel1.push_back(difference_gray);
			merge(channel1, diff_ring_image);
			
			//outputVideo1.write(difference); //Needed
			
			
			//cvtColor( tool_image, tool_image_gray, CV_BGR2GRAY );
			//difference_gray &= tool_image_gray;
			Mat canny_output;
			vector<vector<Point> > contours;
			vector<Vec4i> hierarchy;
			int thresh = 50;
			
			//cvFillHoles(difference_gray);
			
			/// Detect edges using canny

			Canny( difference, canny_output, thresh, thresh*4, 3 );


			normalize(canny_output, canny_output, 0, 1, cv::NORM_MINMAX);
			Mat kernel = (Mat_<uchar>(3,3) << 0, 1, 0, 1, 1, 1, 0, 1, 0);
			Mat dst;
			dilate(canny_output, dst, kernel);
			dilate(dst, dst, kernel);

			normalize(dst, dst, 0, 255, cv::NORM_MINMAX);


			/// Find contours
			findContours( dst, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );


			// approximate contours
			std::vector<std::vector<cv::Point> > contours_poly( contours.size() );
			for( int i = 0; i < contours.size(); i++ ) {
				approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 5, true );
			}


			//Find the largest and second largest contour

			int largest_area=0;
			int second_largest_area = 0;
			int largest_contour_index = 0;
			int sec_largest_contour_index = 0;
			Rect bounding_rect_1, bounding_rect_2;
			for( int i = 0; i< contours_poly.size(); i++ ) // iterate through each contour. 
			{
				double a = contourArea( contours_poly[i],false);	 
				double b = arcLength(contours_poly[i],false);
				if(a > largest_area)
				{
					largest_area = a;
					largest_contour_index = i;//Store the index of largest contour
					bounding_rect_1 = boundingRect(contours_poly[i]); // Find the bounding rectangle for biggest contour
				}
				else if(a > second_largest_area)
				{
					second_largest_area = a;
					sec_largest_contour_index = i;
					bounding_rect_2 = boundingRect(contours_poly[i]);
				}
			}

			// Look for the rectangle with lower y value of the bounding box
			if (bounding_rect_1.y < bounding_rect_2.y)
			{

				Point* startpt = new Point();
				startpt->x = bounding_rect_1.x;
				startpt->y = bounding_rect_1.y + bounding_rect_1.height;
				Point* endpt = new Point();
				endpt->x = bounding_rect_1.x + bounding_rect_1.width;
				endpt->y = bounding_rect_1.y + bounding_rect_1.height;
				line(image, *startpt, *endpt, Scalar(0,0,255),5,8,0);
			}
			else
			{
				Point* startpt = new Point();
				startpt->x = bounding_rect_2.x;
				startpt->y = bounding_rect_2.y + bounding_rect_2.height;
				Point* endpt = new Point();
				endpt->x = bounding_rect_2.x + bounding_rect_2.width;
				endpt->y = bounding_rect_2.y + bounding_rect_2.height;
				line(image, *startpt, *endpt, Scalar(0,0,255),5,8,0);
			}
			//rectangle(image, bounding_rect,  Scalar(0,255,255), 1, CV_AA ); 

			
			imshow("My Window", image); //Needed
			
			//outputVideo.write(image); //Needed
			
			Scalar color( 255,255,255);
			Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
			double min_val,max_val;
			Point min_loc, max_loc;
			drawContours( drawing, contours,largest_contour_index, color, CV_FILLED, 8, hierarchy ); // Draw the largest contour using previously stored index.
			drawContours( drawing, contours,sec_largest_contour_index, color, CV_FILLED, 8, hierarchy ); // Draw the second largest contour using previously stored index.		
			

			///////Fitting line
			//if(contours.size())
			//{
			//	vector<Point> aa= contours[largest_contour_index];
			//	Point tt;
			//	for (int i = 0; i < aa.size(); i++)
			//	{
			//		tt = aa[i];
			//	}
			//	vector<double> line;
			//	if(aa.size() > 20)
			//	fitLine(aa, line , CV_DIST_L2, 0, 0.01,0.01);
			//}
			//double minlinex, maxliney;
			//Point minpoint, maxpoint;
			//minMaxLoc(line, minlinex, maxliney, minpoint,  maxpoint, noArray());
			
			//fittingline(drawing);
			
			frame.copyTo(image);
			/// Show in a window
			namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
			
			imshow( "Contours", drawing );
			
			//outputVideo2.write(drawing); //Needed
			
			dst.copyTo(result);
			waitKey(10);
		}
	}

	a = 0;

	return 0;
}
Ejemplo n.º 25
0
void read_options(int len, char** c,VideoCapture& capture,FileStorage &fs){
		for (int i=0;i<len;i++){
		if (strcmp(c[i],"-b")==0&&!fromCa){
			if (len>i){
				readBB(c[i+1]);
				gotBB = true;
			}
			else
				print_help(c);
		}
		if (strcmp(c[i],"-s")==0&&!fromCa){
			if (len>i){
				video = string(c[i+1]);//continue;
				capture.open(video);
				fromfile = true;
			}
			else
				print_help(c);

		}
		if (strcmp(c[i],"-p")==0){
			if (len>i){
				fs.open(c[i+1], FileStorage::READ);
			}
			else
				print_help(c);
		}
		if (strcmp(c[i],"-no_tl")==0){
			tl = false;
		}
		if (strcmp(c[i],"-r")==0){
			rep = true;
		}
		if(strcmp(c[i],"-im")==0){
			char *directory=c[i+1];
			imageList=EnumFiles(directory,&listCount);
			char *temp=new char[20];
			for(int i=0;i<20;i++)
			{
				if(imageList[0][i]!='\\')
				temp[i]=imageList[0][i];
				else
				{
					temp[i]='\\';
					temp[i+1]=0;
					break;
				}
			}
			//listCount=listCount-2;//第一个是.,第二个是..,去掉不算。
		    isImage=true;
			fromfile = true;
			fromCa=false;
			temp=strcat(temp,"init.txt");
			if(strcmp(imageList[listCount-1],temp)==0)
			{
				listCount--;
				readBB(imageList[listCount]);
				gotBB = true;
			}
		}
	}
}
Ejemplo n.º 26
0
int main(int argc, char* argv[])
{

    int redThreshold = 190;
    vector<Point> outline,drawing; //for the version that we are drawing (scaled and translated)
    Point outlineCenter;
    double outlineArea=0;
    
    // Empty results directory
    struct dirent *next_file;
    DIR *dir;
    char filepath[256];
    string folder;
    
    /* Empty results folder
    folder=dataDir+"results";
    dir=opendir(folder.c_str());
    while ( (next_file = readdir(dir)) )
    {
        // build the full path for each file in the folder
        sprintf(filepath, "%s/%s", folder.c_str(), next_file->d_name);
        if(!remove(filepath))
        {
            cout<<"Removed file "<<filepath<<endl;
        };
    }
    // Empty video folder
    folder=dataDir+"video";
    dir=opendir(folder.c_str());
    while ( (next_file = readdir(dir)) )
    {
        // build the full path for each file in the folder
        sprintf(filepath, "%s/%s", folder.c_str(), next_file->d_name);
        if(!remove(filepath))
        {
            cout<<"Removed file "<<filepath<<endl;
        };
    }*/

    
    // Create star outline
    
    string path = dataDir+"redStar.png";
    if(!FileExist(path)){
        cout << "File " << path << " does not exist" << endl;
        return 1;
    }
    Mat starImage = imread(path);
    Point starCenter;
    imwrite(dataDir+"test_view_before_bad.png", starImage);
    findLargestRedObject(starImage, starCenter, drawing, redThreshold);
    
    
    double scaleFactor = 1.0;
    Point2f translation(0,0);
    
    VideoCapture capture;
    char filename[256];
    capture.open(0);
    if(!capture.isOpened())
    {
        //int error = -1;
        return 1;
    }

   //Load face the cascades
   string face_cascade_name = "haarcascade_frontalface_alt.xml";
   CascadeClassifier face_cascade;
   if( !face_cascade.load( dataDir+face_cascade_name ) )
   {
       printf("--(!)Error loading\n"); return -1; 
   }

    //for tracking the ball
    //vector<Point> drawing; //to keep track of the original track
    //vector<Point> outline; //for the version that we are drawing (scaled and translated)
    double drawingArea;
    Point drawingCenter;
    bool bTracking = false;
    Mat view0;

    //for recording frames of video and results
    bool bRecordVideo = false;
    bool bRecordResults = false;
    int frameNumber=0;

    //create the window with a trackbar for the slider for the red threshold
    namedWindow( "Camera View", 1 );
    createTrackbar( "Red Threshold", "Camera View", &redThreshold, 255, onTrackbar );


    capture.read(view0);
    view0.copyTo(view);

    //bool blink = false;

    while( capture.isOpened() )
    {
        capture.read(view0);
        view0.copyTo(view);
        if(bRecordVideo)
        {
            sprintf(filename, "%svideo/video_%04d.jpg", dataDir.c_str(), frameNumber);
            imwrite(filename, view);
        }

        if(bTracking)
        {
            //We are currently tracking the red object, so draw the track so the user can see it
            trackRedObject(view, drawing, redThreshold);
            //draw the outline as drawn
            drawOutline(view, drawing);
        }
        else if(drawing.size() > 0)
        {
            //If we are not tracking, then detect the faces and draw the outline around the faces
            computeObjectAreaAndCenter(drawing, outlineArea, outlineCenter);
            
                std::vector<Rect> faces;
                detectFaces(view, face_cascade, faces);
                for(int F=0; F<faces.size(); F++)
                {
                    

                    //calculate the center of the faces returned by the face detector
                    Point faceCenter (faces[F].x+faces[F].width/2, faces[F].y + faces[F].height/2);

                    // copy the drawing into another vector so we can manipulate it
                    outline = drawing;

                    // Required: draw the user's drawing around the face
                    //Required: do some manipulations to scale and translate the shape
                    translation=faceCenter-outlineCenter;
                    translateOutline(outline, translation);
                    scaleFactor=pow(faces[F].width,2)/outlineArea;
                    scaleOutline(outline, scaleFactor);
                    
                    //draw the manipulated outline on the image
                    drawOutline(view, outline);

                }
        }


        if(bRecordResults)
        {
            sprintf(filename, "%sresults/results_%04d.jpg", dataDir.c_str(), frameNumber);
            imwrite(filename, view);
        }



        imshow("Camera View", view);
        char key = waitKey(33);
        if(key == 'q')
        {
            break;
        }
        if(key == 't')
        {
            bTracking = !bTracking;

            //We are starting our tracking, so clear out what was in there before
            if(bTracking) 
            {
                drawing.clear();
            }
            else //We are all done tracking, so do a little post-processing
            {
                //get basic information about the drawing that the user drew
                computeObjectAreaAndCenter(drawing, drawingArea, drawingCenter);
            }
        }
        if(key == 'v')
        {
            bRecordVideo = !bRecordVideo;
        }
        if(key == 'r')
        {
            bRecordResults = !bRecordResults;
        }
        if(key == ' ')
        {
            bRecordResults = !bRecordResults;
            bRecordVideo = bRecordResults;
        }

        if(bRecordResults || bRecordVideo)
        {
            frameNumber++;
        }
    }

    return 0;
}
Ejemplo n.º 27
0
Archivo: ct.cpp Proyecto: melmoumni/CT
int main(int argc, char **argv){
	/**
	 * int : lifetime of tracker (number of frames)
	 * Point : initial position of the CMT
	 **/
 	vector< tuple<CMT*,int, Point> > cmt_list;
	CMT* cmt;
	CascadeClassifier logo_cascade;
	String logo_cascade_name;
	VideoCapture capture;
	Mat frame;
	const int nb_frame_threshold = 10;

	if(argc < 2) {
		usage(argv[0]);
		return 0;
	}
	namedWindow(window_name);
	FILELog::ReportingLevel() = logINFO;
	DetectAndDisplay d(window_name);
	logo_cascade_name = argv[1];

  //-- 1. Load the cascades
	if( !logo_cascade.load( logo_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
	
	//-- Get video
	capture.open( -1 );
	if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
	// capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
	// capture.set(CV_CAP_PROP_FRAME_HEIGHT, 320);

	//process images
	while(capture.read(frame)) {
		if( frame.empty() ){
			printf(" --(!) No captured frame -- Break!");
			break;
		}
		frame.copyTo(frame);
		Mat im_gray;
		if (frame.channels() > 1) {
			cvtColor(frame, im_gray, CV_BGR2GRAY);
		} else {
			im_gray = frame;
		}
		vector<Rect> logos;
		//-- 3. Apply the classifier to the frame
		d.detectAndMark(frame, logo_cascade, logos);

		//detect and track new objects
		for(uint i= 0; i<logos.size(); i++){ 
			Point2f a(logos[i].x,logos[i].y);
			bool match = true;
			for(uint j = 0; j < cmt_list.size(); j++){
				Point2f b(std::get<0>(cmt_list[j])->bb_rot.center);
				double res = cv::norm(cv::Mat(a),cv::Mat(b));
				double sizee = sqrt(logos[i].width*logos[i].width + logos[i].height*logos[i].height);
				if(res < sizee){
					std::get<1>(cmt_list[j]) = nb_frame_threshold;
					match = false;
					break;
				} 
			}
			if(match || cmt_list.size() == 0) {
				cmt = new CMT();
				cmt->initialize(im_gray, logos[i]);
				cmt_list.push_back(tuple<CMT*,int, Point>(cmt,nb_frame_threshold,cmt->bb_rot.center));
			}
		}
				
		//dont track an object that has not been detected for a long time
		for(uint i = 0; i<cmt_list.size(); i++){
			Point2f b(std::get<0>(cmt_list[i])->bb_rot.center);
			for(uint j = 0; j<logos.size(); j++) {
				Point2f a(logos[j].x,logos[j].y);
				RotatedRect r = std::get<0>(cmt_list[i])->bb_rot;
				double res = cv::norm(cv::Mat(a),cv::Mat(b));
				double sizee = sqrt(r.size.width * r.size.width + r.size.height * r.size.height);
				if(res<sizee){
					std::get<1>(cmt_list[i])++;
					break;
				}
			}
			std::get<1>(cmt_list[i])--;
			if(std::get<1>(cmt_list[i]) <= 0) {
				cmt_list.erase(cmt_list.begin()+i);
				if(i>0)
					--i;
			}
			for(uint j = 0; j < cmt_list.size() && j!=i; j++){
				Point2f a(std::get<0>(cmt_list[j])->bb_rot.center);
				RotatedRect r = std::get<0>(cmt_list[j])->bb_rot;
				double res = cv::norm(cv::Mat(a),cv::Mat(b));
				double sizee = sqrt(r.size.width * r.size.width + r.size.height * r.size.height);
				if(res<sizee){
					cmt_list.erase(cmt_list.begin()+j);
					break;
				}
			}
		}
		
		d.displayObject(logos, frame);
		for(uint i = 0; i<cmt_list.size(); i++) {
			std::get<0>(cmt_list[i])->processFrame(im_gray);
			char key = display(frame, *(std::get<0>(cmt_list[i])));
			if(key == 'q') break;
		}
		process(frame);

		vector<Point> initials;
		vector<Point> currents;
		for(uint i = 0; i < cmt_list.size(); i++) {
			initials.push_back(std::get<2>(cmt_list[i]));
			currents.push_back(std::get<0>(cmt_list[i])->bb_rot.center);
		}
		
		for(uint i = 0; i < counters.size(); i++) {
			counters[i].setInitials(initials);
			counters[i].setCurrents(currents);
		}
		
		for(uint i = 0; i < counters.size(); i++) {
			tuple<int,int,int, vector<Point> > tmp = counters[i].getSituation();
			putText(frame, to_string(std::get<0>(tmp)) + to_string(std::get<1>(tmp)) + to_string(std::get<2>(tmp)), Point(5,15*i+15), CV_FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255,255,0));
			
			imshow(window_name, frame);
		}
		waitKey(0);
	}
	return EXIT_SUCCESS;
}
Ejemplo n.º 28
0
int main(int argc, char *argv[]) {

    if(!isParam("-sl", argc, argv) || !isParam("-ml", argc, argv) || !isParam("-d", argc, argv)) {
        help();
        return 0;
    }

    float squareLength = (float)atof(getParam("-sl", argc, argv).c_str());
    float markerLength = (float)atof(getParam("-ml", argc, argv).c_str());
    int dictionaryId = atoi(getParam("-d", argc, argv).c_str());
    aruco::Dictionary dictionary =
        aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(dictionaryId));

    bool showRejected = false;
    if(isParam("-r", argc, argv)) showRejected = true;

    bool estimatePose = false;
    Mat camMatrix, distCoeffs;
    if(isParam("-c", argc, argv)) {
        bool readOk = readCameraParameters(getParam("-c", argc, argv), camMatrix, distCoeffs);
        if(!readOk) {
            cerr << "Invalid camera file" << endl;
            return 0;
        }
        estimatePose = true;
    }

    bool autoScale = false;
    float autoScaleFactor = 1.;
    if(isParam("-as", argc, argv)) {
        autoScaleFactor = (float)atof(getParam("-as", argc, argv).c_str());
        autoScale = true;
    }

    aruco::DetectorParameters detectorParams;
    if(isParam("-dp", argc, argv)) {
        bool readOk = readDetectorParameters(getParam("-dp", argc, argv), detectorParams);
        if(!readOk) {
            cerr << "Invalid detector parameters file" << endl;
            return 0;
        }
    }

    VideoCapture inputVideo;
    int waitTime;
    if(isParam("-v", argc, argv)) {
        inputVideo.open(getParam("-v", argc, argv));
        waitTime = 0;
    } else {
        int camId = 0;
        if(isParam("-ci", argc, argv)) camId = atoi(getParam("-ci", argc, argv).c_str());
        inputVideo.open(camId);
        waitTime = 10;
    }

    double totalTime = 0;
    int totalIterations = 0;

    while(inputVideo.grab()) {
        Mat image, imageCopy;
        inputVideo.retrieve(image);

        double tick = (double)getTickCount();

        vector< int > markerIds;
        vector< Vec4i > diamondIds;
        vector< vector< Point2f > > markerCorners, rejectedMarkers, diamondCorners;
        vector< Mat > rvecs, tvecs;

        // detect markers
        aruco::detectMarkers(image, dictionary, markerCorners, markerIds, detectorParams,
                             rejectedMarkers);

        // detect diamonds
        if(markerIds.size() > 0)
            aruco::detectCharucoDiamond(image, markerCorners, markerIds,
                                        squareLength / markerLength, diamondCorners, diamondIds,
                                        camMatrix, distCoeffs);

        // estimate diamond pose
        if(estimatePose && diamondIds.size() > 0) {
            if(!autoScale) {
                aruco::estimatePoseSingleMarkers(diamondCorners, squareLength, camMatrix,
                                                 distCoeffs, rvecs, tvecs);
            } else {
                // if autoscale, extract square size from last diamond id
                for(unsigned int i = 0; i < diamondCorners.size(); i++) {
                    float autoSquareLength = autoScaleFactor * float(diamondIds[i].val[3]);
                    vector< vector< Point2f > > currentCorners;
                    vector< Mat > currentRvec, currentTvec;
                    currentCorners.push_back(diamondCorners[i]);
                    aruco::estimatePoseSingleMarkers(currentCorners, autoSquareLength, camMatrix,
                                                     distCoeffs, currentRvec, currentTvec);
                    rvecs.push_back(currentRvec[0]);
                    tvecs.push_back(currentTvec[0]);
                }
            }
        }


        double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
        totalTime += currentTime;
        totalIterations++;
        if(totalIterations % 30 == 0) {
            cout << "Detection Time = " << currentTime * 1000 << " ms "
                 << "(Mean = " << 1000 * totalTime / double(totalIterations) << " ms)" << endl;
        }


        // draw results
        image.copyTo(imageCopy);
        if(markerIds.size() > 0)
            aruco::drawDetectedMarkers(imageCopy, markerCorners);


        if(showRejected && rejectedMarkers.size() > 0)
            aruco::drawDetectedMarkers(imageCopy, rejectedMarkers, noArray(), Scalar(100, 0, 255));

        if(diamondIds.size() > 0) {
            aruco::drawDetectedDiamonds(imageCopy, diamondCorners, diamondIds);

            if(estimatePose) {
                for(unsigned int i = 0; i < diamondIds.size(); i++)
                    aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i],
                                    squareLength * 0.5f);
            }
        }

        imshow("out", imageCopy);
        char key = (char)waitKey(waitTime);
        if(key == 27) break;
    }

    return 0;
}
Ejemplo n.º 29
0
///Program extracting pictures of face from video file
int main(int argc,char **argv){

  Mat img,eq;
  char ster='1';
  Mat mid;
  Mat bw;
  Mat facePics;

  Size rozm(OUT_WIDTH,OUT_HEIGHT);
  string adres,label;

  Galleries galleries;

  int m,n;

  vector<Rect> faces; 

  VideoCapture cap;

  //  CascadeClassifier finder;
  int limit=1000;
  int counter=0;

  if(argc<4){
    cerr<<"Error: not enough parameters."<<endl<<argv[0]
	<<" galleries_folder label input_device_number [number_of_photos_to_extract]"<<endl;
    return 1;
  }

  CascadeClassifier  finder("kaskady/haarcascade_frontalface_alt_tree.xml");
  adres=argv[1];
  label=argv[2];
  cap.open(argv[3]);
  if(argc>=5){
    limit=atoi(argv[4]);
  }

  try{
    galleries.setPath(adres);
    galleries.load("gallery.xml");
  }
  catch(Exception ex){
    cerr<<"Exception passed up through "<<__FILE__<<':'<<__LINE__
	<<" in fucntion "<<__func__;
    cerr<<ex.code<<endl<<ex.err<<endl<<ex.func<<endl<<ex.line<<endl;
  }
  while(ster!='q' && counter<limit){
    try{
      cap>>img;
      if(img.data==NULL){
	break;
      }
      img.copyTo(facePics);
      
      cvtColor(img,bw,CV_RGB2GRAY);
      equalizeHist(bw,eq);
    }

    catch(Exception ex){
      cerr<<"Exception passed up through "<<__FILE__<<':'<<__LINE__
	  <<" in fucntion "<<__func__;
      cerr<<ex.code<<endl<<ex.err<<endl<<ex.func<<endl<<ex.line<<endl;
    }
    
    try{
      
      {
	stringstream sBufor;
	string cel,buff;
	finder.detectMultiScale(eq,faces,1.3);
	if(!faces.empty()){
	  m=floor(sqrt(faces.size()));
	  n=ceil(sqrt(faces.size()));
	  mid.create(rozm.height*m,rozm.width*n,eq.type());
	  int i=0;
	    
	  for(vector<Rect>::iterator it=faces.begin();
	      it!=faces.end();++it,++i){
	    it->y-=(it->height)*FACE_FACTOR/2;
	    it->height*=(1+FACE_FACTOR);
	    //	    cerr<<rozm.width<<" "<<rozm.height<<endl;
	    rectangle(facePics,
		      Point(it->x,it->y),
		      Point(it->x+it->width,it->y+it->height),
		      Scalar(255,0,0));
	    
	    Mat midPt=mid(Rect(rozm.width*(i/n),
			       rozm.height*(i%m),
			       rozm.width,rozm.height));
	    resize(Mat(eq,(*it)),midPt,midPt.size(),0,0,CV_INTER_LINEAR);
	    if(faces.size()==1){
	      galleries.add(label,midPt);
	      cerr<<++counter<<endl;
	    }
	  }
	}
	if(ster!='q'){
	   if(!faces.empty()){
	  }
	  ster=waitKey(10);
	}
      }
    }
    catch(Exception ex){
      cerr<<"Exception passed up through "<<__FILE__<<':'<<__LINE__
	  <<" in fucntion "<<__func__<<endl;
      cerr<<ex.code<<endl<<ex.err<<endl<<ex.func<<endl<<ex.line<<endl;
    }
    
  }
   
   
   galleries.save("gallery.xml");
     
  return 0;
}
Ejemplo n.º 30
0
int main(int argc, char** argv)
{
	VideoCapture cap;
	bool update_bg_model = true;

	cap.open(0);
	cap.set(CV_CAP_PROP_FPS, 30);
	Mat tmp_frame, bgmask, out_frame, init, original;
	cap >> tmp_frame;
	cap >> init;

	if (tmp_frame.empty())
	{
		printf("can not read data from the video source\n");
		return -1;
	}

	// finger route
	vector<cv::Point> trace;
	int trace_cnt = 0;

	// Character or Number recognition mode
	// Add space, delete mode
	Rect NumRecogRect(10, 10, 90, 50);
	Rect CharRecogRect(110, 10, 90, 50);
	Rect SpaceRecogRect(210, 10, 90, 50);
	Rect DeleteRecogRect(310, 10, 90, 50);

	int RecogMode = NUM_RECOGNITION_MODE;
	int RecogModeChange_cnt = 0;
	int CAM_WIDTH = init.size().width;
	int CAM_HEIGHT = init.size().height;

	Point NumStringPoint(CAM_WIDTH - NumRecogRect.width + 10, 15 + NumRecogRect.height / 2);
	Point CharStringPoint(CAM_WIDTH - (CharRecogRect.width + NumRecogRect.width) - 5, 15 + CharRecogRect.height / 2);
	Point SpaceStringPoint(CAM_WIDTH - (SpaceRecogRect.width + CharRecogRect.width + NumRecogRect.width) - 17, 15 + SpaceRecogRect.height / 2);
	Point DeleteStringPoint(CAM_WIDTH - (DeleteRecogRect.width + SpaceRecogRect.width + CharRecogRect.width + NumRecogRect.width) - 15, 15 + DeleteRecogRect.height / 2);

	//rect size for get char
	//int sizeofRect = int(CAM_WIDTH/7);
	int sizeofRect = 100;
	int init_y;

	// properties of saved image
	vector<int> compression_params;
	compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION);
	compression_params.push_back(9);
	int img_cnt = 0;
	vector<String> waiting_output_list;

	int num_frame = 0;
	time_t start, end;
	time(&start);

	char* buf = new char[MAX_STRING];
	int bTop = 0;
	buf[bTop] = 0;
	try {
		system("echo init > .\\output\\999.txt");
		system("DEL .\\output\\*.txt");
		for (;;)
		{
			
			cap >> tmp_frame;
			cap >> original;
			Mat outimg = Mat::zeros(original.size(), original.type());
			if (tmp_frame.empty())
				break;

			//back substraction
			absdiff(tmp_frame, init, tmp_frame);

			//make binaryMat
			cv::Mat grayscaleMat(tmp_frame.size(), CV_8U);
			cv::cvtColor(tmp_frame, grayscaleMat, CV_BGR2GRAY);
			cv::Mat binaryMat(grayscaleMat.size(), grayscaleMat.type());
			cv::threshold(grayscaleMat, binaryMat, 30, 255, cv::THRESH_BINARY);

			erode(binaryMat, binaryMat, Mat(), Point(-1, -1), 2, 1, 1);
			dilate(binaryMat, binaryMat, Mat(), Point(-1, -1), 5, 1, 1);
			erode(binaryMat, binaryMat, Mat(), Point(-1, -1), 2, 1, 1);

			cv::Mat temp(binaryMat.size(), binaryMat.type());
			binaryMat.copyTo(temp);

			//making mask
			Rect boundingRect1(0, 0, temp.cols, temp.rows * 5 / 6);
			Mat mask1 = Mat::zeros(temp.size(), temp.type());
			rectangle(mask1, boundingRect1, CV_RGB(255, 255, 255), -1);
			temp = temp & mask1;

			//contour
			vector<vector<cv::Point>> contours;
			findContours(temp, contours, cv::noArray(), cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);

			int maxK = 0;
			if (contours.size() == 0)
				continue;

			double maxArea = contourArea(contours[0]);
			for (int k = 0; k < contours.size(); k++){
				double area = contourArea(contours[k]);
				if (area > maxArea){
					maxK = k;
					maxArea = area;
				}
			}
			// convex hull
			vector<int> hull;
			vector<cv::Point> handContour = contours[maxK];
			cv::convexHull(handContour, hull);

			vector<cv::Point> ptsHull;
			int min = temp.rows;
			int min_idx = 0;
			for (int k = 0; k < hull.size(); k++){
				int i = hull[k];
				if (min > handContour[i].y){
					min_idx = i;
					min = handContour[i].y;
				}
				ptsHull.push_back(handContour[i]);
			}
			circle(original, handContour[min_idx], 6, cv::Scalar(0, 100, 255), -1);
			drawContours(original, vector<vector<cv::Point>>(1, ptsHull), 0, cv::Scalar(255, 0, 0), 2);
			for (int k = 0; k < contours.size(); k++){
				cv::Point2f center;
				cv::Moments M = moments(contours[k]);
				center.x = M.m10 / M.m00;
				center.y = M.m01 / M.m00;
			}


			//making mask
			Point2f center;
			float radius;
			minEnclosingCircle(handContour, center, radius);
			line(original, center, handContour[min_idx], Scalar(0, 100, 255), 5);

			//get the circle's bounding rect

			//	Rect boundingRect(center.x - radius, center.y - radius, radius * 2, radius * 2);
			Mat mask = Mat::zeros(binaryMat.size(), binaryMat.type());
			circle(mask, center, int(radius * 5 / 6), CV_RGB(255, 255, 255), 10);
			//	rectangle(mask, boundingRect, CV_RGB(255, 255, 255), -1);
			//	rectangle(mask1, boundingRect1, CV_RGB(255, 255, 255), -1);

			//check number of finger
			Mat img_parted(binaryMat.size(), binaryMat.type());
			img_parted = binaryMat & mask;
			Mat label;
			int num_label = connectedComponents(img_parted, label) - 1;
			if (num_label == 2) {
				trace.push_back(handContour[min_idx]);
				if (trace.size() == 1) 
					init_y = trace[0].y + 2 / 7 * sizeofRect;
				trace_cnt = 0;
			}
			else {
				trace_cnt++;
				if (trace_cnt >= 7) 
					trace.clear();
				
			}

			imshow("bin", binaryMat);

			int idx = 0;
			int startFrame = 0;
			for (int i = 1; i < trace.size(); i++)
			{
				int init_x = trace[0].x - idx*sizeofRect + sizeofRect * 2 / 7;
				//init_y = trace[0].y + 2 / 7 * sizeofRect;
				line(original, trace[i - 1], trace[i], Scalar(255, 100, 0), 5);
				line(outimg, trace[i - 1], trace[i], Scalar(255, 255, 255), 5);
				rectangle(original, Rect(init_x - sizeofRect, init_y, sizeofRect, sizeofRect), CV_RGB(255, 0, 0), 3);
				if (trace[i].x < (init_x - sizeofRect)){
					//send data frome startFrame to currentFrame(==i)
					//if delete mode, the number of idx represents the number of character will be deleting 
					idx++;
					img_cnt++;
					startFrame = i;
					cv::Mat subImg = outimg(cv::Rect(init_x - sizeofRect, init_y, sizeofRect, sizeofRect));
					flip(subImg, subImg, 1);
					bitwise_not(subImg, subImg);
					String img_name = to_string(img_cnt) + ".png";
					String output_name = to_string(img_cnt);
					waiting_output_list.push_back(output_name);
					

					imwrite("./images/" + img_name, subImg, compression_params);
					//imshow("./images/" + to_string(idx + img_cnt * 4) + ".png", subImg);
				
					trace.erase(trace.begin(), trace.begin() + i);
					
					std::string lang;
					if (RecogMode == CHAR_RECOGNITION_MODE)
						lang = "fingkey";
					else if (RecogMode == NUM_RECOGNITION_MODE)
						lang = "num";

					std::string t = ".\\tesseract-ocr\\tesseract.exe .\\images\\" + img_name + " .\\output\\" + output_name+" -l "+ lang +" -psm 10";
					WinExec(LPCSTR(t.c_str()), SW_HIDE);
					
					break;
				}
			}

			//Mode select
			Mat overlay(original.size(), original.type());
			original.copyTo(overlay);

			if (checkPointInRect(CharRecogRect, handContour[min_idx])){
				RecogModeChange_cnt++;
				if (RecogModeChange_cnt == 7) {
					RecogMode = CHAR_RECOGNITION_MODE;
					RecogModeChange_cnt = 0;
					trace.clear();
			//		printf("CHARACTER RECOGNITION MODE!\n");
				}
			}
			else if (checkPointInRect(NumRecogRect, handContour[min_idx])){
				RecogModeChange_cnt++;
				if (RecogModeChange_cnt == 7) {
					RecogMode = NUM_RECOGNITION_MODE;
					RecogModeChange_cnt = 0;
					trace.clear();
			//		printf("NUMBER RECOGNITION MODE!\n");
				}
			}
			else if (checkPointInRect(SpaceRecogRect, handContour[min_idx])){
				RecogModeChange_cnt++;
				if (RecogModeChange_cnt == 7) {
					RecogMode = SPACE_RECOGNITION_MODE;
					RecogModeChange_cnt = 0;
					trace.clear();
			//		printf("SPACE RECOGNITION MODE!\n");
				}
			}
			else if (checkPointInRect(DeleteRecogRect, handContour[min_idx])){
				RecogModeChange_cnt++;
				if (RecogModeChange_cnt == 7) {
					RecogMode = DELETE_RECOGNITION_MODE;
					RecogModeChange_cnt = 0;
					trace.clear();
			//		printf("DELETE RECOGNITION MODE!\n");
				}
			}
			else
				RecogModeChange_cnt = 0;


			if (RecogMode == CHAR_RECOGNITION_MODE) {
				rectangle(overlay, CharRecogRect, CV_RGB(50, 205, 50), -1);
				rectangle(overlay, NumRecogRect, CV_RGB(128, 128, 128), -1);
				rectangle(overlay, SpaceRecogRect, CV_RGB(128, 128, 128), -1);
				rectangle(overlay, DeleteRecogRect, CV_RGB(128, 128, 128), -1);
			}
			else if (RecogMode == NUM_RECOGNITION_MODE){
				rectangle(overlay, CharRecogRect, CV_RGB(128, 128, 128), -1);
				rectangle(overlay, NumRecogRect, CV_RGB(50, 205, 50), -1);
				rectangle(overlay, SpaceRecogRect, CV_RGB(128, 128, 128), -1);
				rectangle(overlay, DeleteRecogRect, CV_RGB(128, 128, 128), -1);
			}
			else if (RecogMode == SPACE_RECOGNITION_MODE){
				rectangle(overlay, CharRecogRect, CV_RGB(128, 128, 128), -1);
				rectangle(overlay, NumRecogRect, CV_RGB(128, 128, 128), -1);
				rectangle(overlay, SpaceRecogRect, CV_RGB(50, 205, 50), -1);
				rectangle(overlay, DeleteRecogRect, CV_RGB(128, 128, 128), -1);
			}
			else if (RecogMode == DELETE_RECOGNITION_MODE){
				rectangle(overlay, CharRecogRect, CV_RGB(128, 128, 128), -1);
				rectangle(overlay, NumRecogRect, CV_RGB(128, 128, 128), -1);
				rectangle(overlay, SpaceRecogRect, CV_RGB(128, 128, 128), -1);
				rectangle(overlay, DeleteRecogRect, CV_RGB(50, 205, 50), -1);
			}

			addWeighted(overlay, 0.5, original, 1 - 0.5, 0, original);

			flip(original, original, 1);
			flip(img_parted, img_parted, 1);

			putText(original, "NUM", NumStringPoint, 2, 0.7, Scalar::all(255));
			putText(original, "CHAR", CharStringPoint, 2, 0.7, Scalar::all(255));
			putText(original, "DEL", DeleteStringPoint, 2, 0.7, Scalar::all(255));
			putText(original, "SPACE", SpaceStringPoint, 2, 0.7, Scalar::all(255));


			/*
			*	Frame
			*/
			time(&end);
			num_frame++;
			std::ostringstream strs;
			strs << num_frame / difftime(end, start);
			std::string fpsString = strs.str();
			putText(original, fpsString, Point(10, 30), 2, 0.7, Scalar::all(255));

			imshow("Original", original);

			char cTemp = NULL;
			if (waiting_output_list.size() > 0) {
				FILE* fp;
				String output_dir = ".\\output\\" + waiting_output_list[0]+".txt";
				system("cls");
				if (fopen_s(&fp, output_dir.c_str(), "r") == 0) {
					//fscanf(fp, " %c", &buf);
					printf("# %s\n", mode_t[RecogMode % 10]);
					if (RecogMode == CHAR_RECOGNITION_MODE || RecogMode == NUM_RECOGNITION_MODE){
						cTemp = fgetc(fp);
						
					}
					else if (RecogMode == SPACE_RECOGNITION_MODE){
						cTemp = ' ';
					}
					else if (RecogMode == DELETE_RECOGNITION_MODE){

					}
					printf("curruntly entered\t : %c\n", cTemp);
					buf[bTop++] = cTemp;
					buf[bTop] = NULL;
					printf("Entered String \t\t: %s\n", buf);

					waiting_output_list.erase(waiting_output_list.begin());
				}
				else {
					printf("Recognition Fail!\n");
				}
			}
			
			
			int keycode = waitKey(30);
			if (keycode == 27)
				break;
		}
	}
	catch (exception& e) {
		cout << e.what() << endl;
	}

	return 0;
}