コード例 #1
0
void App::run()
{
    running = true;
    cv::VideoWriter video_writer;

    Size win_stride(args.win_stride_width, args.win_stride_height);
    Size win_size(args.win_width, args.win_width * 2);
    Size block_size(args.block_width, args.block_width);
    Size block_stride(args.block_stride_width, args.block_stride_height);
    Size cell_size(args.cell_width, args.cell_width);

    cv::Ptr<cv::cuda::HOG> gpu_hog = cv::cuda::HOG::create(win_size, block_size, block_stride, cell_size, args.nbins);
    cv::HOGDescriptor cpu_hog(win_size, block_size, block_stride, cell_size, args.nbins);

    if(args.svm_load) {
//        std::vector<float> svm_model;
//        const std::string model_file_name = args.svm;
//        FileStorage ifs(model_file_name, FileStorage::READ);
//        if (ifs.isOpened()) {
//            ifs["svm_detector"] >> svm_model;
//        } else {
//            const std::string what =
//                    "could not load model for hog classifier from file: "
//                    + model_file_name;
//            throw std::runtime_error(what);
//        }

//        // check if the variables are initialized
//        if (svm_model.empty()) {
//            const std::string what =
//                    "HoG classifier: svm model could not be loaded from file"
//                    + model_file_name;
//            throw std::runtime_error(what);
//        }

        Ptr<SVM> svm;
        // Load the trained SVM.
        svm = StatModel::load<SVM>( args.svm);
        // Set the trained svm to my_hog
        vector< float > hog_detector;
        get_svm_detector( svm, hog_detector );

        gpu_hog->setSVMDetector(hog_detector);
        cpu_hog.setSVMDetector(hog_detector);
    } else {
        // Create HOG descriptors and detectors here
        Mat detector = gpu_hog->getDefaultPeopleDetector();

        gpu_hog->setSVMDetector(detector);
        cpu_hog.setSVMDetector(detector);
    }

    cout << "gpusvmDescriptorSize : " << gpu_hog->getDescriptorSize()
         << endl;
    cout << "cpusvmDescriptorSize : " << cpu_hog.getDescriptorSize()
         << endl;

    while (running)
    {
        VideoCapture vc;
        Mat frame;
        vector<String> filenames;

        unsigned int count = 1;

        if (args.src_is_video)
        {
            vc.open(args.src.c_str());
            if (!vc.isOpened())
                throw runtime_error(string("can't open video file: " + args.src));
            vc >> frame;
        }
        else if (args.src_is_folder) {
コード例 #2
0
int main()
{
    SurfFeatureDetector detector(1000);
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
    Ptr<DescriptorExtractor> extractor = new SurfDescriptorExtractor();
    BOWImgDescriptorExtractor bowide( extractor, matcher );
    
    // load vocabulary data
    Mat vocabulary;
    FileStorage fs( "vocabulary.xml", FileStorage::READ);
    fs["vocabulary"] >> vocabulary;
    fs.release();
    if( vocabulary.empty()  ) return 1;
    
    bowide.setVocabulary( vocabulary );
    
    CvSVM svm_left; svm_left.load("learned_lib_left.xml");
    CvSVM svm_left_mid; svm_left_mid.load("learned_lib_left_mid.xml");
    CvSVM svm_mid; svm_mid.load("learned_lib_mid.xml");
    CvSVM svm_right_mid; svm_right_mid.load("learned_lib_right_mid.xml");
    CvSVM svm_right; svm_right.load("learned_lib_right.xml");
    
    chdir("/home/netbook/Desktop/Capture_Mid");
    // Initialize capturing live feed from the camera device 1
    VideoCapture capture = VideoCapture(1);
    // Couldn't get a device? Throw an error and quit
    if(!capture.isOpened())
    {
        printf("Could not initialize capturing...");
        return -1;
    }
    int n = 0;
    // An infinite loop
    while(true)
    {
        // Will hold a frame captured from the camera
        Mat frame, response_hist;
        
        // If we couldn't grab a frame... quit
        if(!capture.read(frame))
            break;
        imshow("video", frame);
        vector<KeyPoint> keypoints;
        detector.detect(frame,keypoints);
        bowide.compute(frame, keypoints, response_hist);
        
        vector<double> region_list;
        region_list.push_back(svm_left.predict(response_hist,true));
        region_list.push_back(svm_left_mid.predict(response_hist,true));
        region_list.push_back(svm_mid.predict(response_hist,true));
        region_list.push_back(svm_right_mid.predict(response_hist,true));
        region_list.push_back(svm_right.predict(response_hist,true));
        
        long int min_index = std::min_element(region_list.begin(), region_list.end()) - region_list.begin();
        
        if(min_index != 2){
            const string file = to_string(n) + ".jpg";
            imwrite( file, frame );
            n++;
        }
        
        // Wait for a keypress
        int c = cvWaitKey(10);
        if(c!=-1)
        {
            // If pressed, break out of the loop
            break;
        }
    }
    
    return 0;
}
コード例 #3
0
int main(int argc, char *argv[])
{
      if(argc <2)
      {
         return -1;
      }
	cap.open(argv[1]);
	/*获取视频帧数*/
	frameNumber = cap.get(CV_CAP_PROP_FRAME_COUNT);    
	if(!cap.isOpened())
	{
	   cout<<"can't open the video!"<<endl;
	   return 0;
	}

	int flag = 0;
	/*字符串缓冲数组*/
	char  buf[100];										
	ifstream in("test.txt");
	int totalnum = 0;

	while (in.getline(buf,100))
	{
		string str = buf;
		vector<int>   myint;
		/*提取一行字符串中的数字*/
		str2num(str,myint);	
		/*第一行,获取宽和高*/							
		if(0==totalnum)
		{
			fishCorrect.dstWidth = myint[0];
			fishCorrect.dstHeight = myint[1];
			fishCorrect.wRatio = myint[2];
			fishCorrect.hRatio = myint[3];
		}
		/*提取第二行,获取第一个展开图的参数:方位角,仰角,视角*/
		else if(1==totalnum)
		{
			fishCorrect.phi0 = myint[0];
			fishCorrect.sita0 = myint[1];
			fishCorrect.gama0 = myint[2];
		}
		/*提取第三行,获取第二个展开图的参数:方位角,仰角,视角*/
		else if(2==totalnum)
		{
			fishCorrect.phi1 = myint[0];
			fishCorrect.sita1 = myint[1];
			fishCorrect.gama1 = myint[2];
		}
		/*提取第四行,获取第三个展开图的参数:方位角,仰角,视角*/
		else if(3==totalnum)
		{
			fishCorrect.phi2 = myint[0];
			fishCorrect.sita2 = myint[1];
			fishCorrect.gama2 = myint[2];
		}
		/*提取第五行,获取第四个展开图的参数:方位角,仰角,视角*/
		else if(4==totalnum)
		{
			fishCorrect.phi3 = myint[0];
			fishCorrect.sita3 = myint[1];
			fishCorrect.gama3 = myint[2];
		}	
		totalnum++;
	}
	cout<<"playing...."<<endl;
	/*循环处理视频*/
        int wflag = 0;
	int num =0;
	
	fishCorrect.dstImg = Mat::zeros(Size(fishCorrect.dstWidth*2,fishCorrect.dstHeight*2),CV_8UC3);
	fishCorrect.position =  (int*)malloc(fishCorrect.dstWidth*fishCorrect.dstHeight*8*sizeof(int));

	for(int i = 0;i < frameNumber;++i)               
	{
	   		
		pthread_t tid1,tid2;
         	void *tret;
		if(i <= 21)
		{
		    cap >> frame;			
		}
		if(i >20)
		{
		    if (flag == 0)
		    {
		    	/*获取鱼眼图像区域*/
				fishCorrect.GetArea(frame);
				flag =1;
		    }
		   if (i>21)
		    {
			
			/*创建线程2*/
		      if (pthread_create(&tid2,NULL,thrd_func2,NULL)!=0)
			 {
         		    printf("Create thread 2 error!\n");
         		    exit(1);
     			}
	     		/*等待线程一执行完毕*/
			if (pthread_join(tid1,&tret)!=0)
			{ 
				printf("Join thread 1 error!\n");
			      exit(1);
                         }
		   // 显示结果
		    if (wflag == 0)
		    {
		     	namedWindow("Video0",1);
		     	namedWindow("Video1",1);
			    wflag =1;
			}
		    // clock_t t1 = clock();
		    imshow("Video0",fishCorrect.orgImg);
		    imshow("Video1",fishCorrect.dstImg);
		  //  clock_t t2 = clock();
		   // cout<<(t2-t1)/1000.0<<endl;
		     /*等待线程二执行完毕*/
		    if (pthread_join(tid2,&tret)!=0)
			{
		           printf("Join thread 2 error!\n");
		       	   exit(1);
		    }
		    waitKey(1);				
		   }
		   /*如果没有视频数据,跳出循环*/
	  		if (!frame.data)                                       
			{
			   break;
			}
			/*将鱼眼区域提取出来,赋给orgImg变量*/
		  fishCorrect.orgImg  = frame(fishCorrect.correctArea);

		  if (i ==21)
		  {
		  	/*计算所需参数*/
		    fishCorrect.ParamFix();
		    fishCorrect.CalculateParam(fishCorrect.orgImg.cols,fishCorrect.orgImg.rows);
			fishCorrect.CalcPositon(fishCorrect.orgImg);
		  }
		if(i < frameNumber-1)
		{
		 /*创建线程一*/
     		 if (pthread_create(&tid1,NULL,thrd_func1,NULL)!=0) 
		     {
        	     printf("Create thread 1 error!\n");
                 exit(1);
         	  }
		}
	    }
	}
コード例 #4
0
ファイル: main.cpp プロジェクト: arthurmoreno/Desafio-Intel
void Produtor1 ()
{

    /* OpenCV variables */
    VideoCapture cap;
    Mat gray, frame;

    int i, j, d=D, ss, key=0;
    long k=0L, l, ns=2L*(long)(0.5*FS*T), m=ns/N,sso=HIFI?0L:128L, ssm=HIFI?32768L:128L;
    double **A, a, t, dt=1.0/FS, *w, *phi0, s, y, yp, z, tau1, tau2, x, theta,
                      scale=0.5/sqrt((double)M), q, q2, r, sl, sr, tl, tr, yl, ypl, yr, ypr,
                      zl, zr, hrtf, hrtfl, hrtfr, v=340.0,  /* v = speed of sound (m/s) */
                                                  hs=0.20;  /* hs = characteristic acoustical size of head (m) */

    w    = C_ALLOC(M, double);
    phi0 = C_ALLOC(M, double);
    A    = C_ALLOC(M, double *);

    for (i=0; i<M; i++) A[i] = C_ALLOC(N, double);  /* M x N pixel matrix */

    /* Set lin|exp (0|1) frequency distribution and random initial phase */
    if (d)
        for (i=0; i<M; i++)
            w[i] = TwoPi * FL * pow(1.0* FH/FL,1.0*i/(M-1));
    else
        for (i=0; i<M; i++)
            w[i] = TwoPi * FL + TwoPi * (FH-FL)   *i/(M-1) ;

    for (i=0; i<M; i++) phi0[i] = TwoPi * rnd();

    int cam_id = 0;  /* First available OpenCV camera */
    /* Optionally override ID from command line parameter: prog.exe cam_id */

    cap.open(cam_id);
    if (!cap.isOpened())
    {
        // printf("resenha");
        fprintf(stderr,"Could not open camera %d\n", cam_id);
        exit(1);
    }
    printf("abriu camera\n");
    /* Setting standard capture size, may fail; resize later */

    cap.read(frame);  /* Dummy read needed with some devices */
    //cap.set(CV_CAP_PROP_FRAME_WIDTH , 176);
    //cap.set(CV_CAP_PROP_FRAME_HEIGHT, 144);
    cap.set(CV_CAP_PROP_FRAME_WIDTH , 128);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 128);

    if (VIEW)    /* Screen views only for debugging */
    {
        namedWindow("Large", CV_WINDOW_AUTOSIZE);
        namedWindow("Small", CV_WINDOW_AUTOSIZE);
    }
    int count = 0;
    bool bSuccess;
    while (key != 27)    /* Escape key */
    {
        bSuccess=true;
       for (int i=0; i<5 && bSuccess; i++)
			bSuccess = cap.read(frame);

        cap.read(frame);

        if (frame.empty())
        {
            /* Sometimes initial frames fail */
            fprintf(stderr, "Capture failed\n");
            key = waitKey((int)(100));
            continue;
        }
        printf("capturou frame\n");

        Mat tmp;
        cvtColor(frame,tmp,CV_BGR2GRAY);
        if (frame.rows != M || frame.cols != N)
            resize(tmp, gray, Size(N,M));

        else gray=tmp;

        if (VIEW)    /* Screen views only for debugging */
        {
            /* imwrite("hificodeLarge.jpg", frame); */
            imshow("Large", frame);
            /* imwrite("hificodeSmall.jpg", gray); */
            imshow("Small", gray);
        }

        key = waitKey((int)(10));

        if (CAM)    /* Set live camera image */
        {
            for (i=0; i<M; i++)
            {
                for (j=0; j<N; j++)
                {
                    int mVal=gray.at<uchar>(M-1-i,j)/16;

                    if (mVal == 0)
                        A[i][j]=0;
                    else
                        A[i][j]=pow(10.0,(mVal-15)/10.0);  /* 2dB steps */
                }
            }
        }



        /* Write 8/16-bit mono/stereo .wav file */
        fp = fopen(FNAME,"wb");
        fprintf(fp,"RIFF");         //chunkID
        wl(ns*HIST+36L);            //chunkSize
        fprintf(fp,"WAVEfmt ");     //format + subChunk1ID
        wl(16L);                    //subChunk1Size
        wi(1);                      //audioFormat  1 = PCM
        wi(STEREO?2:1);             //numChannels
        wl(0L+FS);                  //sampleRate
        wl(0L+FS*HIST);             //byteRate
        wi(HIST);                   //blockAlign
        wi(HIFI?16:8);              //bitsPerSample
        fprintf(fp,"data");         //subChunk2ID
        wl(ns*HIST);                //subChunk2Size

        //printf("arquivo setado\n");

        tau1 = 0.5 / w[M-1];
        tau2 = 0.25 * tau1*tau1;

        y = yl = yr = z = zl = zr = 0.0;

        /* Not optimized for speed */
        while (k < ns && !STEREO)
        {
            j = k / m;
            if (j>N-1)
                j=N-1;

            s = 0.0;
            t = k * dt;

            if (k < ns/(5*N))
            {
                s = (2.0*rnd()-1.0) / scale;  /* "click" */
            }
            else
            {
                for (i=0; i<M; i++)
                {
                    a = A[i][j];  /* Rectangular time window */
                    s += a * sin(w[i] * t + phi0[i]);
                }
            }

            yp = y;
            y = tau1/dt + tau2/(dt*dt);
            y  = (s + y * yp + tau2/dt * z) / (1.0 + y);
            z = (y - yp) / dt;
            l  = sso + 0.5 + scale * ssm * y; /* y = 2nd order filtered s */
            if (l >= sso-1+ssm) l = sso-1+ssm;
            if (l < sso-ssm) l = sso-ssm;
            ss = (unsigned int) l;

            if (HIFI)
                wi(ss);
            else
                putc(ss,fp);


            k++;
        }


        fclose(fp);

        printf("a\n");
        PlaySound(TEXT("hificode.wav"), NULL, SND_ASYNC );  /* Play the soundscape */
        printf("b\n");

        k=0;  /* Reset sample count */
    }


}
コード例 #5
0
ファイル: OpticalFlow.cpp プロジェクト: 4m1g0/openCV-tutorial
int main(int argc, char* argv[])
{

	VideoCapture capture;

	// Objects
	Mat frame;

	// keyboard pressed
	char keypressed = 0;
	bool success;

	// Load image from disk
	capture.open(0);
	// if not success, exit program
	if (!capture.isOpened()){
		cout << "error in VideoCapture: check path file" << endl;
		getchar();
		return 1;
	}

	/// Parameters for Shi-Tomasi algorithm
	vector<Point2f> cornersA, cornersB;
	double qualityLevel = 0.01;
	double minDistance = 10;
	int blockSize = 3;
	bool useHarrisDetector = false; 
	double k = 0.04;
	int maxCorners = MAX_CORNERS;

	// winsize has to be 11 or 13, otherwise nothing is found
	vector<uchar> status;
	vector<float> error;
	int winsize = 11;
	int maxlvl = 5;

	// Objects
	Mat img_prev, img_next, grayA, grayB;

	success = capture.read(frame);
	// if no success exit program
	if (success == false){
		cout << "Cannot read the frame from file" << endl;
		getchar();
		return 1;
	}

	img_prev = frame.clone();

	// Windows for all the images
	namedWindow("Corners A", CV_WINDOW_AUTOSIZE);
	namedWindow("Corners B", CV_WINDOW_AUTOSIZE);
    
    VideoWriter outputVideo;
    Size S = Size((int) capture.get(CV_CAP_PROP_FRAME_WIDTH),    // Acquire input size
                  (int) capture.get(CV_CAP_PROP_FRAME_HEIGHT));
    int ex = static_cast<int>(capture.get(CV_CAP_PROP_FOURCC));  
    outputVideo.open("video.avi", CV_FOURCC('P','I','M','1'), capture.get(CV_CAP_PROP_FPS), S, true);
    
    if (!outputVideo.isOpened())
    {
        cout  << "Could not open the output video for write: "  << endl;
        return -1;
    }
    
	while (keypressed != ESCAPE)
	{
		// read frame by frame in a loop
		success = capture.read(frame);
		// if no success exit program
		if (success == false){
			cout << "Cannot read the frame from file" << endl;
			return 1;
		}

		img_next = frame.clone();

		// convert to grayScale
		cvtColor(img_prev, grayA, CV_RGB2GRAY);
		cvtColor(img_next, grayB, CV_RGB2GRAY);

		/// Apply corner detection
		goodFeaturesToTrack(grayA,
			cornersA,
			maxCorners,
			qualityLevel,
			minDistance,
			Mat(),
			blockSize,
			useHarrisDetector,
			k);

		calcOpticalFlowPyrLK(grayA, grayB, cornersA, cornersB, status, error,
			Size(winsize, winsize), maxlvl);

		/// Draw corners detected
		//cout << "Number of cornersA detected: " << cornersA.size() << endl;
		//cout << "Optical Flow corners detected: " << cornersB.size() << endl;
		for (int i = 0; i < cornersA.size(); i++)
		{
			line(img_prev, cornersA[i], cornersB[i], Scalar(0, 255, 0), 2);
		}

		// Show image in the name of the window
		imshow("Corners A", img_prev);
		imshow("Corners B", img_next);
        outputVideo << img_prev;
        
		// Function for show the image in ms.
		keypressed = waitKey(1);
		img_prev = img_next;
		

	}
	// Free memory
	img_prev.release();
	img_next.release();
	grayA.release();
	grayB.release();
	destroyAllWindows();
	// End of the program
	return 0;
}
コード例 #6
0
//this is a sample for foreground detection functions
int main(int argc, const char** argv)
{
    help();

    CommandLineParser parser(argc, argv, keys);
    bool useCamera = parser.has("camera");
    bool smoothMask = parser.has("smooth");
    string file = parser.get<string>("file_name");
    string method = parser.get<string>("method");
    VideoCapture cap;
    bool update_bg_model = true;

    if( useCamera )
        cap.open(0);
    else
        cap.open(file.c_str());

    parser.printMessage();

    if( !cap.isOpened() )
    {
        printf("can not open camera or video file\n");
        return -1;
    }



     /// Set background subtractor object 
     Ptr<BackgroundSubtractor> bg_model = method == "knn" ?
            createBackgroundSubtractorKNN().dynamicCast<BackgroundSubtractor>() :
            createBackgroundSubtractorMOG2().dynamicCast<BackgroundSubtractor>();

    

    /// Set VideoWriter object
    Size frameSize = Size((int) cap.get(CAP_PROP_FRAME_WIDTH), (int) cap.get(CAP_PROP_FRAME_HEIGHT));
    VideoWriter put("./backgroundSubtraction.mpg", VideoWriter::fourcc('M','P','E','G'), 30, frameSize); // works

	if(!put.isOpened())
	{
		cout << "File could not be created for writing. Check permissions" << endl;
		return -1;
	}

 
   Mat img0, img, fgmask, fgimg, outputFrame;

    for(;;)
    {
        cap >> img0;

        if( img0.empty() )
            break;

        /// Reduce to 1/4 of the original size
        resize(img0, img, Size(frameSize.width/2, frameSize.height/2), INTER_LINEAR);

        if( fgimg.empty() )
          fgimg.create(img.size(), img.type());

        /// Update the background model
        bg_model->apply(img, fgmask, update_bg_model ? -1 : 0);
        if( smoothMask )
        {
            GaussianBlur(fgmask, fgmask, Size(25, 25), 3.5, 0);
            threshold(fgmask, fgmask, 60, 255, THRESH_BINARY);
        }

        /// Get foreground image
        fgimg = Scalar::all(0);
        img.copyTo(fgimg, fgmask);

        /// Get background image
        Mat bgimg;
        bg_model->getBackgroundImage(bgimg);

     
        stringstream text1, text2, text3, text4;
        text1 << "Raw Video";
        putText(img, text1.str(), cv::Point(10, img.size().height - 10), FONT_HERSHEY_PLAIN, 1.5, Scalar(0,255,0)); 
        text2 << "Mask";
        putText(fgmask, text2.str(), cv::Point(10, img.size().height - 10), FONT_HERSHEY_PLAIN, 1.5, Scalar(255,255,255));
        text3 << "Foreground";
        putText(fgimg, text3.str(), cv::Point(10, img.size().height - 10), FONT_HERSHEY_PLAIN, 1.5, Scalar(0,255,0));
        text4 << "Background";
        putText(bgimg, text4.str(), cv::Point(10, img.size().height - 10), FONT_HERSHEY_PLAIN, 1.5, Scalar(0,255,0));
           
        /// Display output camera frames        
         outputFrame = append4( img,
                               fgmask,
                               fgimg,
                               bgimg); 
        
         imshow(windowName, outputFrame);  
         put << outputFrame;   
       
       

        char k = (char)waitKey(30);
        if( k == 27 ) break;
        if( k == ' ' )
        {
            update_bg_model = !update_bg_model;
            if(update_bg_model)
                printf("Background update is on\n");
            else
                printf("Background update is off\n");
        }
    }

    return 0;
}
コード例 #7
0
/** @function main */
int main( int argc, const char** argv )
{
    bool useCamera = true;
    bool useFiles = false;
    String big_directory;
    if(argc > 1){
        useCamera = false;
        useFiles = true;
        big_directory = argv[1];
    }
    VideoCapture cap;
    Mat frame;
    if(useCamera){
        cap.open(0);
        if(!cap.isOpened()){
            cerr<<"Failed to open camera"<<endl;
            return -1;
        }
        while(frame.empty()){
            cap>>frame;
        }
    }
    //-- 1. Load the cascades
    if( !face_cascade.load( face_cascade_name ) ){ 
        cerr<<"Error loading cascade"<<endl;
        return -1; 
    }    
    vector<string> dirs;
    if(useFiles){
        GetFilesInDirectory(dirs, big_directory);
        cout<<dirs.size()<<endl;
        for(int i = 0; i < dirs.size(); i++){
            cout<<dirs[i]<<endl;
        }
    }
    while(!dirs.empty() || useCamera) {
        vector<string> files;
        string subdir;
        if(useFiles){
            subdir = dirs.back();
            dirs.pop_back();
            GetFilesInDirectory(files, subdir);
        }
        while (true) {
            if (useCamera) {
                cap >> frame;
                if (!frame.empty()) {
                    detectAndDisplay(frame, "camera");
                }
                else {
                    cout << " --(!) No captured frame -- Break!" << endl;
                    break;
                }
            }
            if (useFiles) {
                if (files.empty()) {
                    cout << subdir<<" finished" << endl;
                    break;
                }
                string name = files.back();
                cout << "converting " << name << endl;
                frame = imread(name);
                transpose(frame, frame);
                flip(frame, frame, 1);
                files.pop_back();
                vector<string> splitName;
                splitName = split(subdir, '/');


                detectAndDisplay(frame, splitName.back().c_str());
            }

            int c = waitKey(10);
            if (c == 27) {
                return 0;
            }
        }
    }
    
    return 0;
}
コード例 #8
0
int main(void)
{
	Parameters inParam;
	//	DE_Airport2 -- US_Detroit -- DE_Lehre2
	bool readStatus = readParamRoadScan("../../../../Ford/inVehicle/inVehicle/config/DE_Airport.txt", inParam);

	// Calculate H and H inverse for road scan and traffic sign detection
	ns_roadScan::calHAndInvertH(inParam, H, invertH);

	if( !readStatus )
	{
		cout<<"read parameters error"<<endl;
		return -1;
	}

	int ChooseVideo = Airport;
	int videoIndex = 0;

	int locNum[2], holeNum[2];

	for(int kk = 0; kk < 6; kk++)	//	Airport2
	{
		VideoCapture capture;
		FILE* gpsFile;

		if(ChooseVideo == Airport2)
		{
			if (videoIndex == 0)
			{
				capture.open("F:/roadDB/Airport 2/cam_20150811111511.mp4");
				gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811111511.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
			else if (videoIndex == 1)
			{
				capture.open("F:/roadDB/Airport 2/cam_20150811112010.mp4");
				gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811112010.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
			else if (videoIndex == 2)
			{
				capture.open("F:/roadDB/Airport 2/cam_20150811112510.mp4");
				gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811112510.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
			else if (videoIndex == 3)
			{
				capture.open("F:/roadDB/Airport 2/cam_20150811113010.mp4");
				gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811113010.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
			else if (videoIndex == 4)
			{
				capture.open("F:/roadDB/Airport 2/cam_20150811113510.mp4");
				gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811113510.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}

			else if (videoIndex == 5)
			{
				capture.open("F:/roadDB/Airport 2/cam_20150811114010.mp4");
				gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811114010.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}

			else if (videoIndex == 6)
			{
				capture.open("F:/roadDB/Airport 2/cam_20150811114510.mp4");
				gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811114510.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
			else if (videoIndex == 7)
			{
				capture.open("F:/roadDB/Airport 2/cam_20150811115010.mp4");
				gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811115010.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
			else if (videoIndex == 8)
			{
				capture.open("F:/roadDB/Airport 2/cam_20150811115510.mp4");
				gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811115510.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
			else if (videoIndex == 9)
			{
				capture.open("F:/roadDB/Airport 2/cam_20150811120010.mp4");
				gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811120010.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
			else if (videoIndex == 10)
			{
				capture.open("F:/roadDB/Airport 2/cam_20150811120510.mp4");
				gpsFile = fopen("F:/roadDB/Airport 2/gps/list_20150811120510.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
		}
		else if(ChooseVideo == Airport)
		{
			if (videoIndex == 0)
			{
				capture.open("F:/roadDB/Airport/cam_20150806120920.mp4");
				gpsFile = fopen("F:/roadDB/Airport/gps/list_20150806120920.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
		}
		else if(ChooseVideo == Ford)
		{
			if (videoIndex == 0)
			{
				capture.open("F:/roadDB/Ford/NewcoData/MKS360_20130722_003_Uncompressed.avi");
				gpsFile = fopen("F:/roadDB/Ford/NewcoData/gps_003.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
		}
		else if(ChooseVideo == VW2)
		{
			if (videoIndex == 0)
			{
				capture.open("C:/Users/ypren/Documents/newco_demo/Demo/Ford/inVehicle/inVehicle/resource/Germany/Lehre2/reverse/cap_20150722110100_cut/cam_20150722110100.mp4");
				gpsFile = fopen("C:/Users/ypren/Documents/newco_demo/Demo/Ford/inVehicle/inVehicle/resource/Germany/Lehre2/reverse/cap_20150722110100_cut/list_20150722110100.txt","r");
				capture.set(CV_CAP_PROP_POS_AVI_RATIO, 1);
			}
		}

		int number_of_frames = capture.get(CV_CAP_PROP_POS_FRAMES);

		if ( !capture.isOpened() )  // if not success, exit program
		{
			cout<<"error" <<endl;
			return -1;
		}
		else
		{
			capture.set(CV_CAP_PROP_POS_AVI_RATIO, 0);
			double fps = capture.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video	
		}

		Size S = Size((int) capture.get(CV_CAP_PROP_FRAME_WIDTH), (int) capture.get(CV_CAP_PROP_FRAME_HEIGHT));	

		S.height *= inParam.imageScaleHeight;
		S.width *= inParam.imageScaleWidth;
        
        vector<dataEveryRow> roadPaintData;
		vector<dataEveryRow> roadPaintDataALL;
	    vector<gpsInformationAndInterval> GPSAndInterval;
        
		////////////////////////////////////////////////////////////////////////////
		Mat history = Mat::zeros(S.height *HH*SCALE,S.width, CV_8UC1);
		
		int rowIndex = 0;
		int IntervalTmp = 0;
		int Interval = 0;
		int GPStmp = 0;

		Point2d GPS_next;

        gpsInformationAndInterval gpsAndInterval;
		Mat image;
		int intrtmp = 0;
		int frames = 350;
		vector<Point2d> gps_points;
		
		while(!feof(gpsFile))
		{
			fscanf(gpsFile,"%lf,%lf\n",&GPS_next.x,&GPS_next.y);
			gps_points.push_back(GPS_next);
		}
	
		for (int n = 0; n < 150; n++)
		{
			if ((n*frames + 1) > number_of_frames)
			{
				break;
			}

			capture.set(CV_CAP_PROP_POS_FRAMES, n*frames + 1);

			for(int index = 0; index < frames; index++)//number_of_frames
			{
				capture >> image;
				
				if (image.data && ((n*frames + index + 1) < gps_points.size()))
				{
					roadImageGen(image, history, &rowIndex, &gps_points[n*frames+index], &gps_points[n*frames+index+1], &gpsAndInterval, &intrtmp, inParam);
					
					if (gpsAndInterval.intervalOfInterception)
					{
						GPSAndInterval.push_back(gpsAndInterval);
					}
					
					if(((index == (frames - 1)) || ((n*frames + index + 1) == gps_points.size())) && ( !GPSAndInterval.empty()))
					{
						rowIndex -= GPSAndInterval[GPSAndInterval.size()-1].intervalOfInterception;
					}
				}
				else
				{
					break;
				}
			}
			
			Mat historyROI = history(Rect(0, rowIndex, history.cols, history.rows - rowIndex));
			imwrite("historyroi.png", historyROI);
			
			rowIndex = 0;
			intrtmp = 0;
			
			roadImageProc2(historyROI, GPSAndInterval, roadPaintData, inParam);
			
			history = Mat::zeros(S.height*HH*SCALE, S.width, CV_8UC1);
			
			int H = historyROI.rows;
			
			for(int i = 0; i < roadPaintData.size(); i++)
			{
				roadPaintDataALL.push_back(roadPaintData[i]);
			}
			
			roadPaintData.clear();
			GPSAndInterval.clear();
		}

		char texname[32];

		if (ChooseVideo == VW)
		{
			sprintf(texname, "dataStruct_%d.txt", videoIndex);
		}
		else if (ChooseVideo == Ford)
		{
			sprintf(texname, "dataStruct_%d.txt", videoIndex);
		}
		else if ((ChooseVideo == Honda) || (ChooseVideo == Honda2))
		{
			sprintf(texname, "dataStruct_%d.txt", videoIndex);
		}
		else if (ChooseVideo == Other)
		{
			sprintf(texname, "dataStruct_%d.txt", videoIndex);
		}
		else if (ChooseVideo == Airport)
		{
			sprintf(texname, "dataStruct_%d.txt", videoIndex);
		}
		else if (ChooseVideo == Airport2)
		{
			sprintf(texname, "dataStruct_%d.txt", videoIndex);
		}

		ofstream dataStruct(texname);
		dataStruct<<setprecision(20)<<inParam.GPSref.x<<" "<<inParam.GPSref.y<<endl;


		for(int i = 0; i<roadPaintDataALL.size(); i++)
		{		
			dataStruct<<setprecision(20)<<roadPaintDataALL[i].Left_Middle_RelGPS.x<<" "<<roadPaintDataALL[i].Left_Middle_RelGPS.y<<" "<<roadPaintDataALL[i].isPaint_Left<<" "
				<<roadPaintDataALL[i].Left_Paint_Edge[0].x<<" "<<roadPaintDataALL[i].Left_Paint_Edge[0].y<<" "
				<<roadPaintDataALL[i].Left_Paint_Edge[1].x<<" "<<roadPaintDataALL[i].Left_Paint_Edge[1].y<<" "
				<<roadPaintDataALL[i].Left_Area_Pixel_Mean<<" "
				<<roadPaintDataALL[i].Middle_RelGPS.x<<" "<<roadPaintDataALL[i].Middle_RelGPS.y<<" "<<roadPaintDataALL[i].Middle_Area_Pixel_Mean<<" "
				<<roadPaintDataALL[i].Right_Middle_RelGPS.x<<" "<<roadPaintDataALL[i].Right_Middle_RelGPS.y<<" "<<roadPaintDataALL[i].isPaint_Right<<" "
				<<roadPaintDataALL[i].Right_Paint_Edge[0].x<<" "<<roadPaintDataALL[i].Right_Paint_Edge[0].y<<" "
				<<roadPaintDataALL[i].Right_Paint_Edge[1].x<<" "<<roadPaintDataALL[i].Right_Paint_Edge[1].y<<" "
				<<roadPaintDataALL[i].Right_Area_Pixel_Mean<<endl;
		}

		////output real middle real GPS
		//vector<Point2d> actualGPS;
		//for(int index = 0; index < roadPaintDataALL.size(); index++)
		//{
		//	Point2d middleRealGPS = Point2d(0.0, 0.0);

		//	//calculate real GPS

		//	if(roadPaintDataALL[index].isPaint_Right == 1)
		//	{
		//		calActualGPSFromRef(roadPaintDataALL[index].Right_Middle_RelGPS, inParam.GPSref, middleRealGPS);
		//	}
		//	else
		//	{
		//		calActualGPSFromRef(roadPaintDataALL[index].Middle_RelGPS, inParam.GPSref, middleRealGPS);
		//	}

		//	actualGPS.push_back(middleRealGPS);
		//}

		//ofstream realGPS("realGPS.txt");
		//for(int index = 0; index < actualGPS.size(); index++)
		//{
		//	realGPS<<setprecision(20)<<actualGPS[index].y<<","<<actualGPS[index].x<<","<<0<<endl;
		//}
		//realGPS.close();
		////end output

		cout<<"output finish"<<endl;
		dataStruct.close();
		roadPaintDataALL.clear();

		videoIndex++;
	}
}
コード例 #9
0
ファイル: main.cpp プロジェクト: Beknight/ProofOfConcept
void main(int argc, char *argv[])
{
	Mat emptyFrame = Mat::zeros(Camera::reso_height, Camera::reso_width, CV_8UC3);
	Thesis::FastTracking fastTrack(20); //used to be 50, why? i dno
	Thesis::KalmanFilter kalman;
	kalman.initialise(CoordinateReal(0, 0, 0));
	kalman.openFile();
	// the two stereoscope images
	Camera one(0,-125,0,0,0,90);
	Camera two(2, 125,0,0,0,90);
	Camera three;
	// list of cameras and cameraLocs
	std::vector<Camera> cameraList;
	std::vector<CoordinateReal> locList;
	VideoWriter writeOne ;
	VideoWriter writeTwo;
	VideoWriter writeThree;
	VideoCapture capOne;
	VideoCapture capTwo;
	VideoCapture capThree;
	Thesis::Stats stat;
	cv::Point2d horizontalOne(0,Camera::reso_height/2);
	cv::Point2d horizontalTwo(Camera::reso_width, Camera::reso_height/2);
	cv::Point2d verticalOne(Camera::reso_width / 2, 0);
	cv::Point2d verticalTwo(Camera::reso_width / 2, Camera::reso_height);
	ofstream framesFile_;
	framesFile_.open("../../../../ThesisImages/fps_ABSDIFF.txt");
	double framesPerSecond = 1 / 10.0;
	//open the recorders
	FeatureExtraction surf(5000);
	Stereoscope stereo;
	Util util;
	bool once = false;
	bool foundInBoth = false;
	bool foundInMono = false;
	std::vector<cv::Point2f> leftRect(4);
	cv::Rect leftRealRect;
	cv::Rect rightRealRect;
	std::vector<cv::Point2f> rightRect(4);
	cv::Mat frameLeft;
	cv::Mat frameRight;
	cv::Mat frameThree;
	cv::Mat prevFrameLeft;
	cv::Mat prevFrameRight;
	cv::Mat prevFrameThree;

	// check if you going to run simulation or not or record
	cout << " run simulation: 's' or normal: 'n' or record 'o' or threeCameras 'c' " << endl;
	imshow("main", emptyFrame);
	char command = waitKey(0);

	string left = "../../../../ThesisImages/leftTen.avi";
	string right = "../../../../ThesisImages/rightTen.avi";
	string mid = "../../../../ThesisImages/midTen.avi";
	commands(command);
	emptyFrame = Mat::ones(10, 10, CV_64F);
	imshow("main", emptyFrame);
	command = waitKey(0);
	camCount(command);
	// checkt the cam count 
	if (multiCams){
		//load in all the cameras
		three = Camera(3, 175, -50, 585, 7.1, 97);//Camera(3, 200, -60, 480, 7,111);
	}
	//==========hsv values=======================
	cv::Mat hsvFrame;
	cv::Mat threshold;
	int iLowH = 155;
	int iHighH = 179;

	int iLowS = 75;
	int iHighS = 255;

	int iLowV = 0;
	int iHighV = 255;
	
	//=================================
	double elapsedTime = 0;
	double waitDelta = 0;	
	if (record){
		writeOne.open("../../../../ThesisImages/leftTen.avi", 0, 10, cv::Size(864, 480), true);
		writeTwo.open("../../../../ThesisImages/rightTen.avi", 0, 10, cv::Size(864, 480), true);
		writeThree.open("../../../../ThesisImages/midTen.avi", 0, 10, cv::Size(864, 480), true);
	}else if (simulation){
		capOne.open(left);
		capTwo.open(right);
		capThree.open(mid);
		assert(capOne.isOpened() && capTwo.isOpened());
	}
	 if (hsv){
		//Create trackbars in "Control" window
		cvCreateTrackbar("LowH", "main", &iLowH, 179); //Hue (0 - 179)
		cvCreateTrackbar("HighH", "main", &iHighH, 179);

		cvCreateTrackbar("LowS", "main", &iLowS, 255); //Saturation (0 - 255)
		cvCreateTrackbar("HighS", "main", &iHighS, 255);

		cvCreateTrackbar("LowV", "main", &iLowV, 255); //Value (0 - 255)
		cvCreateTrackbar("HighV", "main", &iHighV, 255);
	}
	if(!simulation){
		cout << " adding" << endl;
		surf.addImageToLib("backToTheFutureCover.jpg");
	}
	CoordinateReal leftLoc;
	CoordinateReal rightLoc;
	CoordinateReal threeLoc;
	while (running){
		clock_t beginTime = clock();
		commands(command);
		if (found){
			kalman.predictState();
			kalman.printCurrentState();
		}
		int thickness = -1;
		int lineType = 8;
		//normal running
		if (!simulation){
			frameLeft = one.grabFrame();
			frameRight = two.grabFrame();
			if (multiCams){
				frameThree = three.grabFrame();
			}
		}
		else{
			 //if last frame, release then reopen
			if (capOne.get(CV_CAP_PROP_POS_FRAMES) == (capOne.get(CV_CAP_PROP_FRAME_COUNT) - 1)){
				capOne.release();
				capTwo.release();
				capOne.open(left);
				capTwo.open(right);
				if (multiCams){
					capThree.release();
					capThree.open(mid);
				}
			}
			// means it is simulation: i.e frames come from a video
			capOne >> frameLeft;
			capTwo >> frameRight;
			if (multiCams){
				capThree >> frameThree;
			}
		}
		if (hsv){
			//convert the frame into hsv
			cvtColor(frameLeft, hsvFrame, COLOR_BGR2HSV);
			inRange(hsvFrame, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), threshold);
			blur(threshold, threshold, cv::Size(20, 20));
			cv::threshold(threshold, threshold, 50, 255, THRESH_BINARY);
			//imshow("imageTwo", hsvFrame);
			imshow("hsv", threshold);
		}
	
		if (record){
			writeOne.write(frameLeft);
			writeTwo.write(frameRight);
			if (multiCams){
				writeThree.write(frameThree);
			}
		}
		if (command == ' '){
			//left frame =============================
			cout << "pressedSpace " << endl;
			std::vector<CoordinateReal> coordLeft = surf.detect(frameLeft, true, found, leftRealRect);
			if (!coordLeft.empty()){
				int thickness = -1;
				int lineType = 8;
				cv::circle(frameLeft, cv::Point2f(coordLeft[0].x(), coordLeft[0].y()), 5,
					cv::Scalar(0, 0, 255),
					thickness,
					lineType);
				leftRect = surf.getSceneCorners();
				line(frameLeft, leftRect[0], leftRect[1], cv::Scalar(0, 255, 0), 2); //TOP line
				line(frameLeft, leftRect[1], leftRect[2], cv::Scalar(0, 0, 255), 2);
				line(frameLeft, leftRect[2], leftRect[3], cv::Scalar(0, 255, 0), 2);
				line(frameLeft, leftRect[3], leftRect[0], cv::Scalar(0, 255, 0), 2);
				leftRealRect = util.getSizedRect(leftRect, one.reso_height, one.reso_width, 0.1);
				leftLoc = coordLeft[0];
			}
			//right frame ==================================
			std::vector<CoordinateReal> coordRight = surf.detect(frameRight, true, found, rightRealRect);
			if (!coordRight.empty()){
				int thickness = -1;
				int lineType = 8;
				cv::circle(frameRight, cv::Point2f(coordRight[0].x(), coordRight[0].y()), 5,
					cv::Scalar(0, 0, 255),
					thickness,
					lineType);
				rightRect = surf.getSceneCorners();
				line(frameRight, rightRect[0], rightRect[1], cv::Scalar(0, 255, 0), 2); //TOP line
				line(frameRight, rightRect[1], rightRect[2], cv::Scalar(0, 0, 255), 2);
				line(frameRight, rightRect[2], rightRect[3], cv::Scalar(0, 255, 0), 2);
				line(frameRight, rightRect[3], rightRect[0], cv::Scalar(0, 255, 0), 2);
				rightRealRect = util.getSizedRect(rightRect, one.reso_height, one.reso_width, 0.1);
				rightLoc = coordRight[0];
			}
			if (multiCams){
				std::vector<CoordinateReal> coordThrees = surf.detect(frameThree, true, false, leftRealRect);
				CoordinateReal coordThree = coordThrees[0];
				rightRect = surf.getSceneCorners();
				line(frameThree, rightRect[0], rightRect[1], cv::Scalar(0, 255, 0), 2); //TOP line
				line(frameThree, rightRect[1], rightRect[2], cv::Scalar(0, 0, 255), 2);
				line(frameThree, rightRect[2], rightRect[3], cv::Scalar(0, 255, 0), 2);
				line(frameThree, rightRect[3], rightRect[0], cv::Scalar(0, 255, 0), 2);
				cout << " foundIN x: " << coordThree.x() << "found in y: " << coordThree.y() << endl;
				foundInMono = true;
				threeLoc = coordThree;
			}
			found = true;
			
		}
		else if(!record){
			cout << " fastTracking " << endl;
			if (once){
				CoordinateReal leftCameraLoc(0, 0, 0);
				CoordinateReal rightCameraLoc(0,0,0);
				if (found) {
					leftCameraLoc = kalman.expectedLocObs(one);
					rightCameraLoc = kalman.expectedLocObs(two);
				}
				leftLoc = fastTrack.findObject(frameLeft, prevFrameLeft, leftCameraLoc,leftDebug);
				rightLoc = fastTrack.findObject(frameRight, prevFrameRight, rightCameraLoc ,rightDebug);
				// go through the list of locations 
				if (multiCams){
					CoordinateReal miscCameraLoc(0, 0, 0);
					if (found){
						miscCameraLoc = kalman.expectedLocObs(three);
					}
					threeLoc = fastTrack.findObject(frameThree, prevFrameThree, miscCameraLoc, threeDebug);
				}
			}
			frameLeft.copyTo(prevFrameLeft);
			frameRight.copyTo(prevFrameRight);
			if (multiCams){
				frameThree.copyTo(prevFrameThree);
			}
			once = true;
			cv::circle(frameLeft, cv::Point2f(leftLoc.x(), leftLoc.y()), 5,
				cv::Scalar(0, 0, 255),
				thickness,
				lineType);
			cv::circle(frameRight, cv::Point2f(rightLoc.x(), rightLoc.y()), 5,
				cv::Scalar(0, 0, 255),
				thickness,
				lineType);
			cv::circle(frameThree, cv::Point2f(threeLoc.x(), threeLoc.y()), 5,
				cv::Scalar(0, 0, 255),
				thickness,
				lineType);
		}
		if (multiCams){
			foundInMono = Util::isInFrame(threeLoc);
		}
		foundInBoth = Util::isInBothFrames(leftLoc, rightLoc);
	    
		if (foundInBoth){
			CoordinateReal real = stereo.getLocation(leftLoc, rightLoc);
			//print the current location
			cout << "x: " << real.x() << "y: " << real.y() << "z: " << real.z() << endl;
			//cout << "time in seconds" << float(clock() - beginTime) / CLOCKS_PER_SEC << endl;
			if (!found){
				cout << "initialising kalman filter" << endl;
				kalman.initialise(real);
			}
			else {
				kalman.stereoObservation(real);
			}
			 
			double curTime = double(clock())/CLOCKS_PER_SEC;
			cout << "curTime" << curTime << endl;
			stat.getVel(real, curTime);
			foundInBoth = false;
			found = true;
		}
		if (foundInMono){
			// pass the observation 
			cout << "found in mono" << endl;
			kalman.observation(threeLoc, three);
			foundInMono = false;
		}
		if (cross){
			// add cross to all the frames
			line(frameRight, horizontalOne, horizontalTwo, cv::Scalar(0, 255, 0), 2); 
			line(frameRight, verticalOne, verticalTwo, cv::Scalar(0, 0, 255), 2);
			line(frameLeft, horizontalOne, horizontalTwo, cv::Scalar(0, 255, 0), 2);
			line(frameLeft, verticalOne, verticalTwo, cv::Scalar(0, 0, 255), 2);
			//multi cam
			if (multiCams){
				line(frameThree, horizontalOne, horizontalTwo, cv::Scalar(0, 255, 0), 2);
				line(frameThree, verticalOne, verticalTwo, cv::Scalar(0, 0, 255), 2);
			}
		}
		cv::imshow("left", frameLeft);
		cv::imshow("right", frameRight);
		if (multiCams){
			cv::imshow("mid", frameThree);
		}
		command = waitKey(1);
		if (surfing){
			cout << "wait" << endl;
			waitKey(0);
			surfing = false;
		}
		clock_t end = clock();
		elapsedTime = double(end - beginTime) / CLOCKS_PER_SEC;
		waitDelta = framesPerSecond - elapsedTime;
		if (waitDelta > 0){
			command = waitKey(waitDelta* 1000);
		}
		 end = clock();
		elapsedTime = double(end - beginTime) / CLOCKS_PER_SEC;
		cout << "fps"  << 1 / elapsedTime << endl;
		//convert fps to string
		string fps = std::to_string(1 / elapsedTime);
		fps += "\n";
		framesFile_ << fps;

	}
	framesFile_.close();
	kalman.closeFile();
	return;
}
コード例 #10
0
int main(int argc, char *argv[]) {
	ros::init(argc, argv, "verify_tracking_node");
	ros::NodeHandle n;
	std::string port;
	ros::param::param<std::string>("~port", port, "/dev/ttyACM0");
	int baud;
	ros::param::param<int>("~baud", baud, 57600);
	ros::Rate loop_rate(10);

	ros::Publisher servox_pub = n.advertise<std_msgs::Char>("servox_chatter", 1000);
	ros::Publisher servoy_pub = n.advertise<std_msgs::Char>("servoy_chatter", 1000);
	ros::Publisher motor_pub = n.advertise<std_msgs::Char>("motor_chatter", 1000);

	ros::Publisher verify_pub = n.advertise<std_msgs::Char>("verify_chatter", 1);

	Subscriber track_sub = n.subscribe("track_chatter", 1, trackCallback);
	Subscriber host_sub = n.subscribe("host_chatter", 1, hostCallback);
	
	cv_result_t cv_result = CV_OK;
	int main_return = -1;
	cv_handle_t handle_detect = NULL;
	cv_handle_t handle_track = NULL;
	VideoCapture capture;
	double time;
	capture.open(0);         // open the camera
	if (!capture.isOpened()) {
		fprintf(stderr, "Verify track can not open camera!\n");
		return -1;
	}
	capStatus = OPEN;
	int frame_width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
	int frame_height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
	int frame_half_width = frame_width >> 1;
	int frame_half_height = frame_height >> 1;
	//printf("width %d height %d \n", frame_width, frame_height);
	Point expect(frame_half_width , frame_half_height);
	struct timeval start0, end0;
	struct timeval start1, end1;
	struct timeval start2, end2;
	struct timeval start3, end3;
	struct timeval start4, end4;
	struct timeval start5, end5;
#ifdef TIME
	gettimeofday(&start0, NULL);
#endif
	cv_handle_t handle_verify = cv_verify_create_handle("data/verify.tar");
#ifdef TIME
	gettimeofday(&end0, NULL);
	time = COST_TIME(start0, end0);
	printf("get from verify tar time cost = %.2fs \n", time / 1000000);
#endif
#if 1
	const int person_number = 3;
	Mat p_image_color_1[person_number], p_image_color_color_1[person_number], p_image_color_2, p_image_color_color_2;
	Mat tmp_frame;
	cv_face_t *p_face_1[person_number];
	cv_face_t *p_face_2;
	int face_count_1[person_number] = {0};
	int face_count_2 = 0;
	cv_feature_t *p_feature_1[person_number];
	cv_feature_t *p_feature_new_1[person_number];
	unsigned int feature_length_1[person_number];
	p_image_color_1[0] = imread("00.JPG");
	p_image_color_1[1] = imread("01.JPG");
	p_image_color_1[2] = imread("02.JPG");
	//p_image_color_1[3] = imread("04.jpg");
	char *string_feature_1[person_number];
#else
	Mat p_image_color_2, p_image_color_color_2;

	const int person_number = 4;
	cv_face_t *p_face_2 = NULL;
	vector<cv_face_t *>p_face_1(person_number,NULL);
	vector<int>face_count_1(person_number, 0);
	int face_count_2 = 0;
	vector<Mat>p_image_color_1(person_number);
	vector<Mat>p_image_color_color_1(person_number);
	vector<cv_feature_t *>p_feature_1(person_number, NULL);
	vector<cv_feature_t *>p_feature_new_1(person_number, NULL);
	vector<unsigned int>feature_length_1(person_number, 0);
	// load image
	p_image_color_1.push_back(imread("01.JPG"));
	p_image_color_1.push_back(imread("02.JPG"));
	p_image_color_1.push_back(imread("03.JPG"));
	p_image_color_1.push_back(imread("04.JPG"));
	char *string_feature_1[person_number];
#endif

	for(int i = 0; i < person_number; i++)
	{
		if (!p_image_color_1[i].data ) {
			fprintf(stderr, "fail to read %d image \n", i);
			//return -1;
			goto RETURN;
		}
	}
	for(int i = 0; i < person_number; i++)
		cvtColor(p_image_color_1[i], p_image_color_color_1[i], CV_BGR2BGRA);
	// init detect handle
	handle_detect = cv_face_create_detector(NULL, CV_FACE_SKIP_BELOW_THRESHOLD | CV_DETECT_ENABLE_ALIGN);
	if (!handle_detect) {
		fprintf(stderr, "fail to init detect handle\n");
		goto RETURN;
		//return -1;
	}
	// detect
#ifdef TIME
	gettimeofday(&start1, NULL);
#endif
	for(int i = 0; i < person_number; i++)
		cv_result = cv_face_detect(handle_detect, p_image_color_color_1[i].data, CV_PIX_FMT_BGRA8888,
				p_image_color_color_1[i].cols, p_image_color_color_1[i].rows, p_image_color_color_1[i].step,
				CV_FACE_UP, &p_face_1[i], &face_count_1[i]);
#ifdef TIME
	gettimeofday(&end1, NULL);
	time = COST_TIME(start1, end1);
	printf("face detect from db time cost = %.2fs \n", time / 1000000);
#endif
	if (cv_result != CV_OK) {
		fprintf(stderr, "st_face_detect error : %d\n", cv_result);
		goto RETURN;
		//return -1;
	}
	for(int i = 0; i < person_number; i++)
	{
		if(face_count_1[i] == 0){
			fprintf(stderr, "can't find face in db %d", i);
			goto RETURN;
		}
	}
	if (handle_verify) {
#ifdef TIME
		gettimeofday(&start2, NULL);
#endif

		for(int i = 0; i < person_number; i++)
			cv_result = cv_verify_get_feature(handle_verify, p_image_color_color_1[i].data, CV_PIX_FMT_BGRA8888,
					p_image_color_color_1[i].cols,
					p_image_color_color_1[i].rows, p_image_color_color_1[i].step, p_face_1[i], &p_feature_1[i],
					&feature_length_1[i]);
#ifdef TIME
		gettimeofday(&end2, NULL);
		time = COST_TIME(start2, end2);
		printf("get feature from db time cost = %.2fs \n", time / 1000000);
#endif
	}
	else {
		fprintf(stderr, "fail to init verify handle, check for the model file!\n");
		goto RETURN;
	}
	for(int i = 0; i < person_number; i++)
	{
		if (feature_length_1[i] > 0) {
			cv_feature_header_t *p_feature_header = CV_FEATURE_HEADER(p_feature_1[i]);
			fprintf(stderr, "Feature information:\n");
			fprintf(stderr, "    ver:\t0x%08x\n", p_feature_header->ver);
			fprintf(stderr, "    length:\t%d bytes\n", p_feature_header->len);

			// test serial and deserial
			string_feature_1[i] = new char[CV_ENCODE_FEATURE_SIZE(p_feature_1[i])];
			cv_verify_serialize_feature(p_feature_1[i], string_feature_1[i]);
			p_feature_new_1[i] = cv_verify_deserialize_feature(string_feature_1[i]);
			delete []string_feature_1[i];
		}
		else {
			fprintf(stderr, "error, the feature length [%d]is 0!\n", i);
		}
	}
	handle_track = cv_face_create_tracker(NULL, CV_FACE_SKIP_BELOW_THRESHOLD);
	if (!handle_track) {
		fprintf(stderr, "fail to init track handle\n");
		goto RETURN;
	}
	//namedWindow("TrackingTest");
	//while (capture.read(p_image_color_2)) {

	while(capture.isOpened()) {
		spinOnce();
		if(host_flag == '0')
			continue;
		for(int i = 0; i < 6; i++)
		{
			capture.read(tmp_frame);
		}
		tmp_frame.copyTo(p_image_color_2); 
		resize(p_image_color_2, p_image_color_2, Size(frame_width, frame_height), 0, 0, INTER_LINEAR);
		cvtColor(p_image_color_2, p_image_color_color_2, CV_BGR2BGRA);

#ifdef TIME
		gettimeofday(&start3, NULL);
#endif
		printf("begin to detect from camera\n");
		cv_result = cv_face_detect(handle_detect, p_image_color_color_2.data, CV_PIX_FMT_BGRA8888,
				p_image_color_color_2.cols, p_image_color_color_2.rows, p_image_color_color_2.step,
				CV_FACE_UP, &p_face_2, &face_count_2);
#ifdef TIME
		gettimeofday(&end3, NULL);
		time = COST_TIME(start3, end3);
		printf("face detect from camera time cost = %.2fs \n", time / 1000000);
#endif
		if (cv_result != CV_OK) {
			fprintf(stderr, "st_face_detect error : %d\n", cv_result);
			goto RETURN;
		}


		// verify the first face
		if (face_count_2 > 0) {
			cv_feature_t *p_feature_2 = NULL;
			vector<float>score(person_number, 0);
			unsigned int feature_length_2;
			// get feature
			//printf("begin to get feature from camera\n");
#ifdef TIME
			gettimeofday(&start4, NULL);
#endif
			printf("begin to get feataure from camera\n");
			cv_result = cv_verify_get_feature(handle_verify, p_image_color_color_2.data, CV_PIX_FMT_BGRA8888,
					p_image_color_color_2.cols,
					p_image_color_color_2.rows, p_image_color_color_2.step, p_face_2, &p_feature_2,
					&feature_length_2);
#ifdef TIME
			gettimeofday(&end4, NULL);
			time = COST_TIME(start4, end4);
			printf("get feature from camera time cost = %.2fs \n", time / 1000000);
#endif

			if ( feature_length_2 > 0) {
				char *string_feature_2 = new char[CV_ENCODE_FEATURE_SIZE(p_feature_2)];
				cv_verify_serialize_feature(p_feature_2, string_feature_2);
				cv_feature_t *p_feature_new_2 = cv_verify_deserialize_feature(string_feature_2);
				delete []string_feature_2;

				// compare feature
}
#ifdef TIME
				gettimeofday(&start5, NULL);
#endif
				printf("begin to compare feature with db\n");
				for(int i = 0; i < person_number; i++)
				{
					cv_result = cv_verify_compare_feature(handle_verify, p_feature_new_1[i],
							p_feature_new_2, &score[i]);
				}
#ifdef TIME
				gettimeofday(&end5, NULL);
				time = COST_TIME(start5, end5);
				printf("compare feature time cost = %.2fms \n", time / 1000);
#endif
				if (cv_result == CV_OK) {
					float max_score = score[0];
					int max_id = 0;
					for(int i = 1; i < person_number; i++)
					{
						if(score[i] > max_score)
						{
							max_score = score[i];
							max_id = i;
						}
					}

					fprintf(stderr, "max score: %f\n", max_score);
					// comapre score with DEFAULT_THRESHOLD
					// > DEFAULT_THRESHOLD => the same person
					// < DEFAULT_THRESHOLD => different people
					if (max_score > DEFAULT_THRESHOLD)
					{
						fprintf(stderr, "you are the right person, your number is %d\n", max_id);
						capStatus = Verified;
						// send verify_flag msg to verify chatter
						verify_flag = '1';
						verify.data = verify_flag;
						verify_pub.publish(verify);
						//printf("verify node publish verify flag %c to speech node\n", verify_flag);
						spinOnce();
						printf("track flag %c\n", track_flag);
						if(track_flag == '0')
							continue;
						int track_value = face_track(capture, expect, frame_width, frame_height, handle_track, servox_pub, servoy_pub,motor_pub);
						if(track_value == -1)
						{
							printf("no face detected !, verified frome start!\n");
							verify_flag = '0';
							verify.data = verify_flag;
							verify_pub.publish(verify);
							track_flag = '0';
							continue;
						}
					}
					else
					{
						fprintf(stderr, "no you are not right person .\n");
						verify_flag = '0';
						verify.data = verify_flag;
						verify_pub.publish(verify);
						track_flag = '0';
					}
				} else {
					fprintf(stderr, "cv_verify_compare_feature error : %d\n", cv_result);
				}
				cv_verify_release_feature(p_feature_new_2);
			} else {
				fprintf(stderr, "error, the feature length is 0!\n");
			}
			cv_verify_release_feature(p_feature_2);
		} else {
コード例 #11
0
/**
 * Canny Edge Detector.
 *
 * argv[1] = source file or will default to "../../resources/traffic.mp4" if no
 * args passed.
 *
 * @author sgoldsmith
 * @version 1.0.0
 * @since 1.0.0
 */
int main(int argc, char *argv[]) {
	int return_val = 0;
	string url = "../../resources/traffic.mp4";
	string output_file = "../../output/canny-cpp.avi";
	cout << CV_VERSION << endl;
	cout << "Press [Esc] to exit" << endl;
	VideoCapture capture;
	Mat image;
	// See if URL arg passed
	if (argc == 2) {
		url = argv[1];
	}
	cout << "Input file:" << url << endl;
	cout << "Output file:" << output_file << endl;
	capture.open(url);
	// See if video capture opened
	if (capture.isOpened()) {
		cout << "Resolution: " << capture.get(CV_CAP_PROP_FRAME_WIDTH) << "x"
				<< capture.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;
		bool exit_loop = false;
		// Video writer
		VideoWriter writer(output_file, (int) capture.get(CAP_PROP_FOURCC),
				(int) capture.get(CAP_PROP_FPS),
				Size((int) capture.get(CAP_PROP_FRAME_WIDTH),
						(int) capture.get(CAP_PROP_FRAME_HEIGHT)));
		Mat gray_img;
		Mat blur_img;
		Mat edges_img;
		Mat dst_img;
		Size kSize = Size(3, 3);
		int frames = 0;
		timeval start_time;
		gettimeofday(&start_time, 0);
		// Process all frames
		while (capture.read(image) && !exit_loop) {
			if (!image.empty()) {
				// Convert the image to grayscale
				cvtColor(image, gray_img, COLOR_BGR2GRAY);
				// Reduce noise with a kernel 3x3
				GaussianBlur(gray_img, blur_img, kSize, 0);
				// Canny detector
				Canny(blur_img, edges_img, 100, 200, 3, false);
				// Add some colors to edges from original image
				bitwise_and(image, image, dst_img, edges_img);
				// Write frame with motion rectangles
				writer.write(dst_img);
				// Make sure we get new matrix
				dst_img.release();
				frames++;
			} else {
				cout << "No frame captured" << endl;
				exit_loop = true;
			}
		}
		timeval end_time;
		gettimeofday(&end_time, 0);
		cout << frames << " frames" << endl;
		cout << "FPS " << (frames / (end_time.tv_sec - start_time.tv_sec))
				<< ", elapsed time: " << (end_time.tv_sec - start_time.tv_sec)
				<< " seconds" << endl;
		// Release VideoWriter
		writer.release();
		// Release VideoCapture
		capture.release();
	} else {
		cout << "Unable to open device" << endl;
		return_val = -1;
	}
	return return_val;
}
コード例 #12
0
ファイル: cascadeclassifier.cpp プロジェクト: ArkaJU/opencv
int main(int argc, const char *argv[])
{
    if (argc == 1)
    {
        help();
        return -1;
    }

    if (getCudaEnabledDeviceCount() == 0)
    {
        return cerr << "No GPU found or the library is compiled without CUDA support" << endl, -1;
    }

    cv::cuda::printShortCudaDeviceInfo(cv::cuda::getDevice());

    string cascadeName;
    string inputName;
    bool isInputImage = false;
    bool isInputVideo = false;
    bool isInputCamera = false;

    for (int i = 1; i < argc; ++i)
    {
        if (string(argv[i]) == "--cascade")
            cascadeName = argv[++i];
        else if (string(argv[i]) == "--video")
        {
            inputName = argv[++i];
            isInputVideo = true;
        }
        else if (string(argv[i]) == "--camera")
        {
            inputName = argv[++i];
            isInputCamera = true;
        }
        else if (string(argv[i]) == "--help")
        {
            help();
            return -1;
        }
        else if (!isInputImage)
        {
            inputName = argv[i];
            isInputImage = true;
        }
        else
        {
            cout << "Unknown key: " << argv[i] << endl;
            return -1;
        }
    }

    Ptr<cuda::CascadeClassifier> cascade_gpu = cuda::CascadeClassifier::create(cascadeName);

    cv::CascadeClassifier cascade_cpu;
    if (!cascade_cpu.load(cascadeName))
    {
        return cerr << "ERROR: Could not load cascade classifier \"" << cascadeName << "\"" << endl, help(), -1;
    }

    VideoCapture capture;
    Mat image;

    if (isInputImage)
    {
        image = imread(inputName);
        CV_Assert(!image.empty());
    }
    else if (isInputVideo)
    {
        capture.open(inputName);
        CV_Assert(capture.isOpened());
    }
    else
    {
        capture.open(atoi(inputName.c_str()));
        CV_Assert(capture.isOpened());
    }

    namedWindow("result", 1);

    Mat frame, frame_cpu, gray_cpu, resized_cpu, frameDisp;
    vector<Rect> faces;

    GpuMat frame_gpu, gray_gpu, resized_gpu, facesBuf_gpu;

    /* parameters */
    bool useGPU = true;
    double scaleFactor = 1.0;
    bool findLargestObject = false;
    bool filterRects = true;
    bool helpScreen = false;

    for (;;)
    {
        if (isInputCamera || isInputVideo)
        {
            capture >> frame;
            if (frame.empty())
            {
                break;
            }
        }

        (image.empty() ? frame : image).copyTo(frame_cpu);
        frame_gpu.upload(image.empty() ? frame : image);

        convertAndResize(frame_gpu, gray_gpu, resized_gpu, scaleFactor);
        convertAndResize(frame_cpu, gray_cpu, resized_cpu, scaleFactor);

        TickMeter tm;
        tm.start();

        if (useGPU)
        {
            cascade_gpu->setFindLargestObject(findLargestObject);
            cascade_gpu->setScaleFactor(1.2);
            cascade_gpu->setMinNeighbors((filterRects || findLargestObject) ? 4 : 0);

            cascade_gpu->detectMultiScale(resized_gpu, facesBuf_gpu);
            cascade_gpu->convert(facesBuf_gpu, faces);
        }
        else
        {
            Size minSize = cascade_gpu->getClassifierSize();
            cascade_cpu.detectMultiScale(resized_cpu, faces, 1.2,
                                         (filterRects || findLargestObject) ? 4 : 0,
                                         (findLargestObject ? CASCADE_FIND_BIGGEST_OBJECT : 0)
                                            | CASCADE_SCALE_IMAGE,
                                         minSize);
        }

        for (size_t i = 0; i < faces.size(); ++i)
        {
            rectangle(resized_cpu, faces[i], Scalar(255));
        }

        tm.stop();
        double detectionTime = tm.getTimeMilli();
        double fps = 1000 / detectionTime;

        //print detections to console
        cout << setfill(' ') << setprecision(2);
        cout << setw(6) << fixed << fps << " FPS, " << faces.size() << " det";
        if ((filterRects || findLargestObject) && !faces.empty())
        {
            for (size_t i = 0; i < faces.size(); ++i)
            {
                cout << ", [" << setw(4) << faces[i].x
                     << ", " << setw(4) << faces[i].y
                     << ", " << setw(4) << faces[i].width
                     << ", " << setw(4) << faces[i].height << "]";
            }
        }
        cout << endl;

        cv::cvtColor(resized_cpu, frameDisp, COLOR_GRAY2BGR);
        displayState(frameDisp, helpScreen, useGPU, findLargestObject, filterRects, fps);
        imshow("result", frameDisp);

        char key = (char)waitKey(5);
        if (key == 27)
        {
            break;
        }

        switch (key)
        {
        case ' ':
            useGPU = !useGPU;
            break;
        case 'm':
        case 'M':
            findLargestObject = !findLargestObject;
            break;
        case 'f':
        case 'F':
            filterRects = !filterRects;
            break;
        case '1':
            scaleFactor *= 1.05;
            break;
        case 'q':
        case 'Q':
            scaleFactor /= 1.05;
            break;
        case 'h':
        case 'H':
            helpScreen = !helpScreen;
            break;
        }
    }
コード例 #13
0
int main() 
{
	string fileName = "traffic.avi";
	capture.open(fileName);		//Video capture from harddisk(.avi) or from camera
	if( !capture.isOpened() )
	{	
		cerr<<"video opening error\n"; waitKey(0); system("pause");  
	}

	Mat frameImg_origSize;							//image taken from camera feed in original size
	namedWindow( "out"	  , CV_WINDOW_AUTOSIZE);	//window to show output
	namedWindow( "trackbar", CV_WINDOW_AUTOSIZE);	//Trackbars to change value of parameters
	resizeWindow( "trackbar", 300, 600);			//Resizing trackbar window for proper view of all the parameters
	
	
	capture>>frameImg_origSize; // Just to know original size of video
	if( frameImg_origSize.empty() ) { cout<<"something wrong"; }
	

	resize(frameImg_origSize, frameImg, Size(WIDTH_SMALL, HEIGHT_SMALL), 0, 0, CV_INTER_AREA);	//Resize original frame into smaller frame for faster calculations

	Size origSize = frameImg_origSize.size();	//original size
	cout<<"ORIG: size = "<<frameImg_origSize.cols
		<<" X "<<frameImg_origSize.rows
		<<" step "<<frameImg_origSize.step
		<<" nchannels "<<frameImg_origSize.channels()<<endl;	//print original size: width, height, widthStep, no of channels.

	g_image = Mat(Size(WIDTH_SMALL, HEIGHT_SMALL), CV_8UC1);	g_image.setTo(0);	//Gray image of frameImg
	//frameData  = (char*)frameImg ->imageData;	//Data of frameImg
	//calibIntensity();	//Average Intensity of all pixels in the image

	//cout<<"calibintensity\n";
	Mat roadImage = Mat(Size(WIDTH_SMALL,HEIGHT_SMALL), CV_8UC3);	//Image of the road (without vehicles)
	roadImage = findRoadImage();	//Image of the road
	
	cout<<"roadimage\n";
	//char* roadImageData = (char*)roadImage->imageData;	//Data of roadImage
	calibPolygon();	//Polygon caliberation: Select four points of polygon clockwise and press enter

	cout<<"polyArea = "<<polyArea;	//Area of selected polygon
	Mat binImage = Mat(Size(WIDTH_SMALL, HEIGHT_SMALL),CV_8UC1);	//white pixel = cars, black pixel = other than cars
	//char* binImageData = (char*)binImage->imageData;	//data of binImage
	Mat finalImage = Mat(Size(WIDTH_SMALL,HEIGHT_SMALL), CV_8UC3);	//final image to show output

	double T = time(0);	//Current time
	float fps = 0, lastCount = 0;	//frames per second

	int thresh_r = 43, thresh_g = 43, thresh_b = 49;						//Threshold parameters for Red, Green, Blue colors
	createTrackbar( "Red Threshold", "trackbar", &thresh_r, 255, 0 );		//Threshold for Red color
	createTrackbar( "Green Threshold", "trackbar", &thresh_g, 255, 0 );		//Threshold for Green color
	createTrackbar( "Blue Threshold", "trackbar", &thresh_b, 255, 0 );		//Threshold for Blue color
	int dilate1=1, erode1=2, dilate2=5;	//Dilate and Erode parameters
	Mat imgA = Mat(Size(WIDTH_SMALL,HEIGHT_SMALL),CV_8SC3);//Used for opticalFlow
	//CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];	//Input points for opticalFlow
	//CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ];	//Output points from opticalFlow
	vector<Point2f> cornersA, cornersB;

	frameImg.copyTo(imgA);//cvCopyImage(frameImg,imgA);	//copy from frameImg to imgA
	
	int win_size = 20;	//parameter for opticalFlow
	int corner_count = MAX_CORNERS;	//no of points tracked in opticalFlow
	//Mat pyrA;// = cvCreateImage( size(WIDTH_SMALL,HEIGHT_SMALL), IPL_DEPTH_32F, 1 );	//Temp image (opticalFlow)
	//Mat pyrB;// = cvCreateImage( size(WIDTH_SMALL,HEIGHT_SMALL), IPL_DEPTH_32F, 1 );	//Temp image (opticalFlow)
	double distance;	//Length of lines tracked by opticalFlow
	int maxArrowLength = 100, minArrowLength = 0;	//div by 10 //Max and Min length of the tracked lines
	int arrowGap = 5;	//distance between consecutive tracking points (opticalFlow)
	createTrackbar("max arrow length", "trackbar", &maxArrowLength, 100, 0);	//Manually change max length of tracked lines
	createTrackbar("min arrow length", "trackbar", &minArrowLength, 100, 0);	//Manually change min length of tracked lines
	createTrackbar("dilate 1","trackbar", &dilate1, 15, 0);	//first dilate
	createTrackbar("erode 1","trackbar", &erode1, 15, 0);		//first erode
	createTrackbar("dilate 2","trackbar", &dilate2, 15, 0);	//second dilate
	char features_found[ MAX_CORNERS ];	//temp data (opticalFlow)
	float feature_errors[ MAX_CORNERS ];//temp data (opticalFlow)
	Mat dilate1_element = getStructuringElement(MORPH_ELLIPSE , Size(2 * dilate1 + 1, 2 * dilate1 + 1), Point(-1,-1) );
	Mat erode1_element = getStructuringElement(MORPH_ELLIPSE , Size(2 * erode1 + 1, 2 * erode1 + 1), Point(-1,-1) );
	Mat dilate2_element = getStructuringElement(MORPH_ELLIPSE , Size(2 * dilate2 + 1, 2 * dilate2 + 1), Point(-1,-1) );
	
	vector< Vec4i > hierarchy;
	vector< vector<Point> > contours;
	vector< uchar > vstatus; 
	vector< float >verror;

	//////////////////////////////////////////////////////////////////////////
	while(true) //Loops till video buffers
	{
		++fps;	//calculation of Frames Per Second
		capture>>frameImg_origSize; //Store image in original size
		if( frameImg_origSize.empty() ) break; //if there is no frame available (end of buffer); stop.
		resize(frameImg_origSize, frameImg, frameImg.size()); //resize original image into smaller image for fast calculation
		imshow("video", frameImg);
		
		register int X; //temp variable
		for( int i=0; i<HEIGHT_SMALL; ++i) //iter through whole frame and compare it with image of road; if greater than threshold, it must be a vehicle
		{
			for(int j=0; j<WIDTH_SMALL; ++j)
			{
				//X = i*WIDTH_STEP_SMALL+j*NCHANNELS;
				if(	abs(roadImage.at<Vec3b>(i,j)[0]-frameImg.at<Vec3b>(i,j)[0])<thresh_r &&
					abs(roadImage.at<Vec3b>(i,j)[1]-frameImg.at<Vec3b>(i,j)[1])<thresh_g &&
					abs(roadImage.at<Vec3b>(i,j)[2]-frameImg.at<Vec3b>(i,j)[2])<thresh_b ) //comparing frame image against road image using threshold of Red, Green and Blue
				{	binImage.at<uchar>(i,j) = 0;
					
				}	//other than vehicle (black)
				else
				{	binImage.at<uchar>(i,j) = 255;
					
				}	//vehicle (white)
		    }
		}
		
		frameImg.copyTo(finalImage);
		
		bitwise_and(binImage, polygonImg, binImage, noArray());	//Quadrilateral Cropping

		imshow("bin image", binImage);
		//int dilate1 = 4;
		
		dilate(binImage, binImage, dilate1_element);
		erode(binImage, binImage, erode1_element);
		dilate(binImage, binImage, dilate2_element);
		imshow("noise removed", binImage);

		//////////////////////////////////////////////////////////////////////////
		binImage.copyTo(g_image);
		
		
		findContours( g_image, contours, hierarchy, CV_RETR_LIST  , CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
		/// finds contours. g_image = imput image, g_storage = temp storage, &contours = location where contour info is saved
		/// CV_RETR_CCOMP = contours are stored as connected component, CV_CHAIN_APPROX_SIMPLE = contour finding method
		double  percentArea = 0; // % of area occupied by vehicles from the area of polygon
		double contoursArea = 0;

		cout<<"("<<contours.size()<<") ";


		for(int idx=0; idx<contours.size(); idx++)
		{
			// cout<<"idx = "<<idx;
			if( !contours.at(idx).empty() )	
			{
				contoursArea += contourArea( contours.at(idx) );
			}
			// cout<<" area = "<<contoursArea<<endl;
			Scalar color( rand()&255, rand()&255, rand()&255 );
			drawContours(finalImage, contours, idx, color);
		}
		imshow("contour drawing", finalImage);

		contours.clear();
		hierarchy.clear();

		percentArea = contoursArea/polyArea;
		cout<<(int)(percentArea*100)<<"% ";

		// ---------------------------------------------------------------------------------------------------------------------------
		int xCorners = 0; //No of points to be tracked by opticalFlow
		for(int i=0; i<HEIGHT_SMALL; i+=arrowGap) //preparing input points to be tracked
		{
			for(int j=0; j<WIDTH_SMALL; j+=arrowGap)
			{
				if( xCorners >= MAX_CORNERS-1 ) break; //no of points must not exceed MAX_CORNERS
				if( binImage.at<uchar>(i,j) == 255 )
				//if( binImageData[i*WIDTH_SMALL + j] == 255 ) //points must be chosen only on the vehicles (white pixels)
				{
					cornersA.push_back(Point2f(i,j));
					//cornersA[xCorners].x = j;
					//cornersA[xCorners].y = i;
					++xCorners;
				}
			}
		}
		cornersB.reserve(xCorners);
		//if( percentArea>80.0 || fps<=4 )	arrowGap=15; //reduce point density if processor is loaded
		//else if( percentArea>40.0 || fps<=7 )	arrowGap=10;
		//else	arrowGap=5;
		//x corner_count = xCorners; //no of points to be tracked
		
		
		calcOpticalFlowPyrLK(imgA,frameImg,cornersA,cornersB,vstatus, verror, Size( win_size,win_size ),5,cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),0); //calculates opticalFlow
		/// imgA = previous image; frameImg = current image; cornersA = input points; cornersB = output points; Rest is not important
			
		int xCornersInRange = 1; // No of points which satisfies Min and Max length criteria
		double avgDist = 0; //average length of tracked lines = average movement of vehicles.
		for( int i=0; i<xCorners; i++ ) //iterate through all tracking points
		{
			distance = dist(cornersA[i], cornersB[i]); //length of tracked lines = magnitude of movement of vehicle
			//if( distance < maxArrowLength/10 && distance > minArrowLength/10) //only accept points which lies in Min-Max range
			{
				++xCornersInRange;
				avgDist += distance; //add length of all lines
				line( finalImage, Point(cornersA[i].x,cornersA[i].y), Point(cornersB[i].x,cornersB[i].y) , CV_RGB(0,0,255),1 , CV_AA); //draw all tracking  lines
			}
		}
		avgDist /= xCornersInRange; //average length of lines
		cout<<avgDist;
		frameImg.copyTo(imgA);

		cornersA.clear();
		cornersB.clear();
		vstatus.clear();
		verror.clear();

		//cvCopyImage(frameImg,imgA); //current image frameImg will be previous image imgA for the next frame
		//////////////////////////////////////////////////////////////////////////
		line(finalImage, pts[0], pts[1], CV_RGB(0,255,0),1,CV_AA); //draw polygon in final image (Green)
		line(finalImage, pts[1], pts[2], CV_RGB(0,255,0),1,CV_AA);
		line(finalImage, pts[2], pts[3], CV_RGB(0,255,0),1,CV_AA);
		line(finalImage, pts[3], pts[0], CV_RGB(0,255,0),1,CV_AA);
		imshow( "out", finalImage); // show final output image
		waitKey(33);
		if(time(0) >= T+1)
		{
			cout<<" ["<<fps<<"]";
			fps = 0;
			T = time(0);
		}
		cout<<endl;
	}
	cout<<"\nFINISH\n";
	return 0;
}
コード例 #14
0
ファイル: camshiftdemo.cpp プロジェクト: SCS-B3C/OpenCV2-2
int main( int argc, char** argv )
{
    VideoCapture cap;
    Rect trackWindow;
    RotatedRect trackBox;
    int hsize = 16;
    float hranges[] = {0,180};
    const float* phranges = hranges;

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        cap.open(argc == 2 ? argv[1][0] - '0' : 0);
    else if( argc == 2 )
        cap.open(argv[1]);

    if( !cap.isOpened() )
    {
    	help();
        cout << "***Could not initialize capturing...***\n";
        return 0;
    }

    help();

    namedWindow( "Histogram", 1 );
    namedWindow( "CamShift Demo", 1 );
    setMouseCallback( "CamShift Demo", onMouse, 0 );
    createTrackbar( "Vmin", "CamShift Demo", &vmin, 256, 0 );
    createTrackbar( "Vmax", "CamShift Demo", &vmax, 256, 0 );
    createTrackbar( "Smin", "CamShift Demo", &smin, 256, 0 );

    Mat hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
    
    for(;;)
    {
        Mat frame;
        cap >> frame;
        if( frame.empty() )
            break;

        frame.copyTo(image);
        cvtColor(image, hsv, CV_BGR2HSV);

        if( trackObject )
        {
            int _vmin = vmin, _vmax = vmax;

            inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
                    Scalar(180, 256, MAX(_vmin, _vmax)), mask);
            int ch[] = {0, 0};
            hue.create(hsv.size(), hsv.depth());
            mixChannels(&hsv, 1, &hue, 1, ch, 1);

            if( trackObject < 0 )
            {
                Mat roi(hue, selection), maskroi(mask, selection);
                calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
                normalize(hist, hist, 0, 255, CV_MINMAX);
                
                trackWindow = selection;
                trackObject = 1;

                histimg = Scalar::all(0);
                int binW = histimg.cols / hsize;
                Mat buf(1, hsize, CV_8UC3);
                for( int i = 0; i < hsize; i++ )
                    buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
                cvtColor(buf, buf, CV_HSV2BGR);
                    
                for( int i = 0; i < hsize; i++ )
                {
                    int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
                    rectangle( histimg, Point(i*binW,histimg.rows),
                               Point((i+1)*binW,histimg.rows - val),
                               Scalar(buf.at<Vec3b>(i)), -1, 8 );
                }
            }

            calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
            backproj &= mask;
            RotatedRect trackBox = CamShift(backproj, trackWindow,
                                TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));

            if( backprojMode )
                cvtColor( backproj, image, CV_GRAY2BGR );
            ellipse( image, trackBox, Scalar(0,0,255), 3, CV_AA );
        }

        if( selectObject && selection.width > 0 && selection.height > 0 )
        {
            Mat roi(image, selection);
            bitwise_not(roi, roi);
        }

        imshow( "CamShift Demo", image );
        imshow( "Histogram", histimg );

        char c = (char)waitKey(10);
        if( c == 27 )
            break;
        switch(c)
        {
        case 'b':
            backprojMode = !backprojMode;
            break;
        case 'c':
            trackObject = 0;
            histimg = Scalar::all(0);
            break;
        case 'h':
            showHist = !showHist;
            if( !showHist )
                destroyWindow( "Histogram" );
            else
                namedWindow( "Histogram", 1 );
            break;
        default:
            ;
        }
    }

    return 0;
}
コード例 #15
0
void main()
{
	int num1 = 0;
	int num2 = 0;
	int result;
	char key;
	char command = '@';
	bool SecondNumPressed = false;


	VideoCapture cap;
	cap.open(0);
	if (!cap.isOpened())
	{
		system("CLS");
		printf("\n\n\t\t\tcamera disconnected");
		system("PAUSE");
		exit;
	}
	cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);


	system("CLS");
	printf("\n\n\t\t\t0");
	while (true)
	{
		key = press(cap);
		if ((SecondNumPressed == true) && ((key != '=') && (((key - '0')<0) || ((key - '0')>9))))
		{
			//cout << SecondNumPressed << " " << key << " " << key - '0' << "\n";
			continue;
		}
		if (key == '=')
		{
			system("CLS");

			if ((num2 == 0) && (command == '/'))
				printf("\n\n\t\t\tcan not devide with zero");
			else if (SecondNumPressed == true)
				printf("\n\n\t\t\t%d", result);
			else
				printf("\n\n\t\t\t%d", num1);
			num1 = 0;
			num2 = 0;
			command = '@';
			SecondNumPressed = false;
		}
		else if (key == 'c')
		{
			system("CLS");
			num1 = 0;
			num2 = 0;
			command = '@';
			printf("\n\n\t\t\t0");
			SecondNumPressed = false;
		}
		else if ((key == '+') || (key == '*') || (key == '-') || (key == '/'))
		{
			if (command != '@')
			{
				system("CLS");
				printf("\n\n\t\t\t%d", num1);
			}
			printf(" %c ", key);
			command = key;
		}
		else if (command == '@')
		{
			system("CLS");
			if (key == '<')
				num1 = num1 - num1 % 10;
			else
				num1 = num1 * 10 + (key - '0');
			printf("\n\n\t\t\t%d", num1);
		}
		else
		{
			system("CLS");
			if (key == '<')
				num2 = num2 - num2 % 10;
			else
			{
				num2 = num2 * 10 + (key - '0');
				SecondNumPressed = true;
			}
			printf("\n\n\t\t\t%d %c %d", num1, command, num2);
			if (command == '+')
				result = num1 + num2;
			else if (command == '-')
				result = num1 - num2;
			else if (command == '*')
				result = num1 * num2;
			else if (command == '/')
			if (num2 != 0)
				result = num1 / num2;
		}
	}
}
コード例 #16
0
ファイル: tvl1_optical_flow.cpp プロジェクト: 93sam/opencv
int main(int argc, const char* argv[])
{
    const char* keys =
        "{ h help     | false           | print help message }"
        "{ l left     |                 | specify left image }"
        "{ r right    |                 | specify right image }"
        "{ o output   | tvl1_output.jpg | specify output save path }"
        "{ c camera   | 0               | enable camera capturing }"
        "{ m cpu_mode | false           | run without OpenCL }"
        "{ v video    |                 | use video as input }";

    CommandLineParser cmd(argc, argv, keys);

    if (cmd.has("help"))
    {
        cout << "Usage: pyrlk_optical_flow [options]" << endl;
        cout << "Available options:" << endl;
        cmd.printMessage();
        return EXIT_SUCCESS;
    }

    string fname0 = cmd.get<string>("l");
    string fname1 = cmd.get<string>("r");
    string vdofile = cmd.get<string>("v");
    string outpath = cmd.get<string>("o");
    bool useCPU = cmd.get<bool>("s");
    bool useCamera = cmd.get<bool>("c");
    int inputName = cmd.get<int>("c");

    UMat frame0, frame1;
    imread(fname0, cv::IMREAD_GRAYSCALE).copyTo(frame0);
    imread(fname1, cv::IMREAD_GRAYSCALE).copyTo(frame1);
    cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();

    UMat flow;
    Mat show_flow;
    vector<UMat> flow_vec;
    if (frame0.empty() || frame1.empty())
        useCamera = true;

    if (useCamera)
    {
        VideoCapture capture;
        UMat frame, frameCopy;
        UMat frame0Gray, frame1Gray;
        UMat ptr0, ptr1;

        if(vdofile.empty())
            capture.open( inputName );
        else
            capture.open(vdofile.c_str());

        if(!capture.isOpened())
        {
            if(vdofile.empty())
                cout << "Capture from CAM " << inputName << " didn't work" << endl;
            else
                cout << "Capture from file " << vdofile << " failed" <<endl;
            goto nocamera;
        }

        cout << "In capture ..." << endl;
        for(int i = 0;; i++)
        {
            if( !capture.read(frame) )
                break;

            if (i == 0)
            {
                frame.copyTo( frame0 );
                cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
            }
            else
            {
                if (i%2 == 1)
                {
                    frame.copyTo(frame1);
                    cvtColor(frame1, frame1Gray, COLOR_BGR2GRAY);
                    ptr0 = frame0Gray;
                    ptr1 = frame1Gray;
                }
                else
                {
                    frame.copyTo(frame0);
                    cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
                    ptr0 = frame1Gray;
                    ptr1 = frame0Gray;
                }

                alg->calc(ptr0, ptr1, flow);
                split(flow, flow_vec);

                if (i%2 == 1)
                    frame1.copyTo(frameCopy);
                else
                    frame0.copyTo(frameCopy);
                getFlowField(flow_vec[0].getMat(ACCESS_READ), flow_vec[1].getMat(ACCESS_READ), show_flow);
                imshow("tvl1 optical flow field", show_flow);
            }

            char key = (char)waitKey(10);
            if (key == 27)
                break;
            else if (key == 'm' || key == 'M')
            {
                ocl::setUseOpenCL(!cv::ocl::useOpenCL());
                cout << "Switched to " << (ocl::useOpenCL() ? "OpenCL" : "CPU") << " mode\n";
            }
        }

        capture.release();
    }
    else
    {
nocamera:
        if (cmd.has("cpu_mode"))
        {
            ocl::setUseOpenCL(false);
            std::cout << "OpenCL was disabled" << std::endl;
        }
        for(int i = 0; i <= LOOP_NUM; i ++)
        {
            cout << "loop" << i << endl;

            if (i > 0) workBegin();

            alg->calc(frame0, frame1, flow);
            split(flow, flow_vec);

            if (i > 0 && i <= LOOP_NUM)
                workEnd();

            if (i == LOOP_NUM)
            {
                if (useCPU)
                    cout << "average CPU time (noCamera) : ";
                else
                    cout << "average GPU time (noCamera) : ";
                cout << getTime() / LOOP_NUM << " ms" << endl;

                getFlowField(flow_vec[0].getMat(ACCESS_READ), flow_vec[1].getMat(ACCESS_READ), show_flow);
                imshow("PyrLK [Sparse]", show_flow);
                imwrite(outpath, show_flow);
            }
        }
    }

    waitKey();

    return EXIT_SUCCESS;
}
コード例 #17
0
ファイル: benchmark.cpp プロジェクト: GEO-IASS/opencv_contrib
static AssessmentRes assessment(char* video,char* gt_str, char* algorithms[],char* initBoxes_str[],int algnum){
  char buf[200];
  int start_frame=0;
  int linecount=0;
  Rect2d boundingBox;
  vector<double> averageMillisPerFrame(algnum,0.0);

  FILE* gt=fopen(gt_str,"r");
  if(gt==NULL){
      printf("cannot open the ground truth file %s\n",gt_str);
      exit(EXIT_FAILURE);
  }
  for(linecount=0;fgets(buf,sizeof(buf),gt)!=NULL;linecount++);
  if(linecount==0){
      printf("ground truth file %s has no lines\n",gt_str);
      exit(EXIT_FAILURE);
  }
  fseek(gt,0,SEEK_SET);
  if(fgets(buf,sizeof(buf),gt)==NULL){
      printf("ground truth file %s has no lines\n",gt_str);
      exit(EXIT_FAILURE);
  }

  std::vector<Rect2d> initBoxes(algnum);
  for(int i=0;i<algnum;i++){
      printf("%s %s\n",algorithms[i],initBoxes_str[CMDLINEMAX*i]);
      if(lineToRect(initBoxes_str[CMDLINEMAX*i],boundingBox)<0){
          printf("please, specify bounding box for video %s, algorithm %s\n",video,algorithms[i]);
          printf("FYI, initial bounding box in ground truth is %s\n",buf);
          if(gt!=NULL){
              fclose(gt);
          }
          exit(EXIT_FAILURE);
      }else{
          initBoxes[i].x=boundingBox.x;
          initBoxes[i].y=boundingBox.y;
          initBoxes[i].width=boundingBox.width;
          initBoxes[i].height=boundingBox.height;
      }
  }

  VideoCapture cap;
  cap.open( String(video) );
  cap.set( CAP_PROP_POS_FRAMES, start_frame );

  if( !cap.isOpened() ){
    printf("cannot open video %s\n",video);
    help();
  }

  Mat frame;
  namedWindow( "Tracking API", 1 );

  std::vector<Ptr<Tracker> >trackers(algnum);
  for(int i=0;i<algnum;i++){
      trackers[i] = Tracker::create( algorithms[i] );
      if( trackers[i] == NULL ){
        printf("error in the instantiation of the tracker %s\n",algorithms[i]);
        if(gt!=NULL){
            fclose(gt);
        }
        exit(EXIT_FAILURE);
      }
  }

  cap >> frame;
  frame.copyTo( image );
  if(lineToRect(buf,boundingBox)<0){
      if(gt!=NULL){
          fclose(gt);
      }
      exit(EXIT_FAILURE);
  }
  rectangle( image, boundingBox,palette[0], 2, 1 );
  for(int i=0;i<(int)trackers.size();i++){
      rectangle(image,initBoxes[i],palette[i+1], 2, 1 );
      if( !trackers[i]->init( frame, initBoxes[i] ) ){
        printf("could not initialize tracker %s with box %s at video %s\n",algorithms[i],initBoxes_str[i],video);
        if(gt!=NULL){
            fclose(gt);
        }
        exit(EXIT_FAILURE);
      }
  }
  imshow( "Tracking API", image );

  int frameCounter = 0;
  AssessmentRes res((int)trackers.size());

  for ( ;; ){
    if( !paused ){
      cap >> frame;
      if(frame.empty()){
        break;
      }
      frame.copyTo( image );

      if(fgets(buf,sizeof(buf),gt)==NULL){
          printf("ground truth is over\n");
          break;
      }
      if(lineToRect(buf,boundingBox)<0){
          if(gt!=NULL){
              fclose(gt);
          }
          exit(EXIT_FAILURE);
      }
      rectangle( image, boundingBox,palette[0], 2, 1 );
      
      frameCounter++;
      for(int i=0;i<(int)trackers.size();i++){
          bool trackerRes=true;
          clock_t start;start=clock();
          trackerRes=trackers[i]->update( frame, initBoxes[i] );
          start=clock()-start;
          averageMillisPerFrame[i]+=1000.0*start/CLOCKS_PER_SEC;
          if(trackerRes==false){
              initBoxes[i].height=initBoxes[i].width=-1.0;
          }else{
              rectangle( image, initBoxes[i], palette[i+1], 2, 1 );
          }
          for(int j=0;j<(int)res.results[i].size();j++)
              res.results[i][j]->assess(boundingBox,initBoxes[i]);
      }
      imshow( "Tracking API", image );

      if((frameCounter+1)>=ASSESS_TILL){
          break;
      }

      char c = (char) waitKey( 2 );
      if( c == 'q' )
        break;
      if( c == 'p' )
        paused = !paused;
      }
  }
コード例 #18
0
ファイル: OpenCV.cpp プロジェクト: jrivera97/COP3503FINAL
void OpenCV::addNewUser(string name, vector<User> users){
    
    
    
    

    
    
    VideoCapture capture;
    Mat frame;
    
    capture.open(-1);
    
    if ( !capture.isOpened() ) {
        printf("--(!)Error opening video capture\n");
    }
    
    int count = 0;
    
    string folder = name;
    string mkdirCropped = "mkdir cropped";
    string folderCreateCommand = "mkdir cropped/" + folder;
    system(mkdirCropped.c_str());
    system(folderCreateCommand.c_str());

    while (capture.read(frame) && (count < 50))
    {
        if( frame.empty() )
        {
            printf(" --(!) No captured frame -- Break!");
            break;
        }
        
        
        //-- 3. Apply the classifier to the frame
        detectNewUser(frame, count, name);
        string window_name = "Capture - Face detection";
        namedWindow(window_name, WINDOW_AUTOSIZE );
        imshow(window_name, frame);
        int c = waitKey(10);
        if( (char)c == 27 ) { break; }
    }
    
    capture.release();
    
    string path = "cropped/";
    
    vector<Mat> images;
    vector<int> nameIndex;
    vector<User>::iterator it;
    int i = 0;
    for(it = users.begin() ; it < users.end(); it++, i++) {
        // found nth element..print and break.
        for (int j = 0; j < 50; j++){
            string file = path + name + "/" + to_string(j) + ".jpg";
            Mat image = imread(file, 0);
            Mat newImage;
            
                cv::resize(image, newImage, Size(200, 200), 0, 0, INTER_LINEAR);
           
            
            
            images.push_back(newImage);
            nameIndex.push_back(i);
        }
    }
    
    
    //trains the new model
    Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
    cout<<"asdasdfasdffasf"<<endl;
    model->train(images,nameIndex);
    cout<<"asdfasf"<<endl;
    model->save(TRAINEDMODEL);
    cout<<"Swagm oney"<<endl;
    
}
コード例 #19
0
ファイル: main.cpp プロジェクト: YureVieira/Raspberry
int main()
{
  VideoCapture cap;
  for(int i=0; i<2; i++)
  {
    cap.open(i);
    if (cap.isOpened()) break;
  }

  if (!cap.isOpened())
  {
    cerr<<"Error opening the camera"<<endl;
    return -1;
  }
  cap.set(CV_CAP_PROP_FRAME_HEIGHT,img_height);
  cap.set(CV_CAP_PROP_FRAME_WIDTH,img_width);
  //    img_height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
  //    img_width = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
  center_screen = Point(cap.get(CV_CAP_PROP_FRAME_WIDTH)/2,cap.get(CV_CAP_PROP_FRAME_HEIGHT)/2);
  color[0] = 103;         ///Azul
  //    cap.set(CV_CAP_PROP_GAIN, 48);
  //    cap.set(CV_CAP_PROP_BRIGHTNESS, 10);

  namedWindow("window",1);
  namedWindow("result",1);
  createTrackbar("H","window", &FAIXA_H, 90);
  createTrackbar("S","window", &FAIXA_S, 126);
  createTrackbar("V","window", &FAIXA_V, 126);
  createTrackbar("Limiar","result", &limiar, 255);
  createTrackbar("cut","result", &cut_vertical, cap.get(CV_CAP_PROP_FRAME_HEIGHT)/2-1);
  setMouseCallback("window", CallBackFunc, NULL);


  Mat frame0,pre_img,result,aux;

  vector<Mat> HSV_chanells;

  vector<vector<Point> > contours;
  vector<Point> approx;

  for(;;)
  {
    cap >> frame0;
    //        GaussianBlur(frame, frame, Size(5, 5), 2, 1 );//aplique um filtro

    //Corte na imagem
    Mat frame = Mat(frame0, Rect(0,//Imagem cortada
    cut_vertical,
    cap.get(CV_CAP_PROP_FRAME_WIDTH),
    cap.get(CV_CAP_PROP_FRAME_HEIGHT) - cut_vertical*2));

///    imshow("subframe",frame);
    if(flag_color)
    {   //Imagens coloridas(segmentação de cores)
      medianBlur(frame,frame,5);
      cvtColor(frame,pre_img,CV_BGR2HSV);
      pre_img.copyTo(image_HSV);
    }
    else
    {   //Tons de cinza(segmentação simples,lousa)
      GaussianBlur(frame, frame, cv::Size(5, 5), 2, 2 );  //aplique um filtro
      cvtColor(frame,pre_img,CV_BGR2GRAY);               //Converte para tons de cinza e guarda em img_gray.
      threshold(pre_img,                                 //Imagem de origem.
      pre_img,                                 //Image de destino.
      limiar,                                      //Limiar.
      255,                                      //
      CV_THRESH_BINARY);                        //

      pre_img.copyTo(aux);

      Canny(pre_img, result, v1, v2);                 //Busca de bordas.

      findContours(result,
      contours,
      CV_RETR_LIST,
      CV_CHAIN_APPROX_SIMPLE);
    }


    /********************************************************************************************************************************/
#ifdef ERODE
    ///Erosão
    int erosion_size = 3;
    Mat element = getStructuringElement( MORPH_ELLIPSE,
    Size( 2*erosion_size + 1, 2*erosion_size+1 ),
    Point( erosion_size, erosion_size ) );
    /// Apply the erosion operation
    //erode( result, result, element );
    //        erode( frame, frame, element );
    dilate( frame, frame, element );
#endif
    /********************************************************************************************************************************/
    if(flag_color)
    {
      inRange(pre_img,Scalar(color[0]-FAIXA_H,color[1]-FAIXA_S,color[2]-FAIXA_V),Scalar(color[0]+FAIXA_H,color[1]+FAIXA_S,color[2]+FAIXA_V),result);
      result.copyTo(aux);
      findContours(result,contours,CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);
    }
    /************************************************************
     * Segmentação dos blobs
     ***********************************************************/
    int blob_index=0;
    int blob_area=0;
    int area_min;
    if(flag_color)area_min = 100;
    else area_min = 10;
    /***********************************************************
     * Separa o maior blob para segmentação
     ***********************************************************/
    for( size_t i = 0; i < contours.size() ; i++ )
    {
      if(fabs(contourArea(Mat(contours[i]))) >= area_min /*&& fabs(contourArea(Mat(contours[i]))) < 10000*/)
      {
        blob_area = fabs(contourArea(Mat(contours[i])));
        blob_index = i;
      }
    }

    if(blob_area >= area_min)
    {
      //cout << "Área: "<< blob_area <<endl;
      approxPolyDP(contours[blob_index], approx, arcLength(Mat(contours[blob_index]), true)*0.02, true);
      Rect boundRect = boundingRect( Mat(approx) );
      rectangle( frame, boundRect.tl(), boundRect.br(), Scalar(126,200,100), 2);
      /************************************************************
       * Extração do centro do blob
       ************************************************************/
      //Point2f center;                                     ///Variavel que recebe o centro do blob.
      float radius;                                           ///Variavel auxiliar.
      minEnclosingCircle(approx,center,radius);           ///Acha o centro do blob.
//      transform_XY.transform_coor((k_blob/blob_area),center);
        transform_XY.transform_coor((img_height - center.y),center);
        transform_XY.show_result();

      //circle( result, center, (int)radius, 255/*cv::Scalar(255,200,100)*/, 1);
      //        circle( frame, center, (int)radius, cv::Scalar(255,0,0), 2);      ///Circulo que marca o blob.
      //        show_dist((float)blob_area,konst,center.x);
      //        cout <<"centro:("<< (int)center.x<<", "<<(int)center.y<<") / Area="<<blob_area<< " / Raio="<<radius<<endl;
    }

    //        circle(frame,center_screen,5,Scalar(10,255,50),3,2);        ///Circulo qua marca o centro.
    circle(frame,Point(_x,_y),1,Scalar(100,255,50),3,2);        ///Circulo que marca o ponto clicado.

    //        if(center.x >center_screen.x)line(frame,center_screen,center,Scalar(0,255,0),2);
    //        else line(frame,center_screen,center,Scalar(0,0,255),2);

    Rect rect_aux = Rect(0,
    cut_vertical,
    cap.get(CV_CAP_PROP_FRAME_WIDTH),
    cap.get(CV_CAP_PROP_FRAME_HEIGHT) - cut_vertical*2);

    rectangle( frame0, rect_aux.tl(), rect_aux.br(), Scalar(0,0,255), 1);

    imshow("result",aux);
    imshow( "window", frame0 );

    int key = waitKey(10);
    if(key==(int)'c')flag_color = !flag_color;
    if(key==(int)'q')break;     ///Sair com letra q

  }
  destroyAllWindows();
  return 0;
}
コード例 #20
0
int main (int argc, const char * argv[])
{
	printf("Hello\n");
	cout << "Hello Artyom" << endl << flush ;
    VideoCapture cap;
    cap.open("http://192.168.1.105:8080/?action=stream");
    /*
    cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
	*/
    if (!cap.isOpened())
        return -1;

    Mat img;


/*    VideoCapture cap1(1);

    cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);

    if (!cap1.isOpened())
        return -1;

    Mat img2; */

    string Pos = "";
    HOGDescriptor hog;
    hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
    string posPoint = "";
    string posRect ="";
    while (true)
    {
        cap >> img;
      //  cap1 >> img2;

        if (!img.data)
            continue;

        vector<Rect> found, found_filtered;
        vector<Point> found1, found_filtered1;
        hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
        hog.detect(img, found1, 0, Size(8,8), Size(0,0));

        size_t i, j;



        for (i = 0 ; i < found1.size(); i++){

        	Point tempPoint = found1[i];

    	    Rect r ;
    	    if (tempPoint.x > 0 && tempPoint.y > 0) {
    	    r.x += tempPoint.x;
            r.y += tempPoint.y;

    	    r.width = 10;
    	    r.height = 10;
        	rectangle(img, r.tl(), r.br(), cv::Scalar(255,0,0), 2);
    	    string x = to_string(r.x);
    	    string y = to_string(r.y);
    	    posPoint = "Pos: x:" + x+ " y: " + y;
    	    }


        }

        for (i=0; i<found.size(); i++)
        {
            Rect r = found[i];
            for (j=0; j<found.size(); j++)
                if (j!=i && (r & found[j])==r)
                    break;
            if (j==found.size())
                found_filtered.push_back(r);
        }
        for (i=0; i<found_filtered.size(); i++)
        {
	    Rect r = found_filtered[i];
            r.x += cvRound(r.width*0.1);
	    r.width = cvRound(r.width*0.8);
	    r.y += cvRound(r.height*0.06);
	    r.height = cvRound(r.height*0.9);

//	    string x = to_string(r.x);
	    string y = to_string(r.y);
	  //  posRect = "Pos: x:" + x+ " y: " + y;

	    rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 2);
        }


        int number  = 5;
        char text[255];
        sprintf(text, "Score %d", (int)number);

        CvFont font;
        double hScale=1.0;
        double vScale=1.0;
        int    lineWidth=1;
        cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, hScale,vScale,0,lineWidth);

        IplImage* img1 = new IplImage(img);

        char* p = new char[posRect.length()+1];

        memcpy(p, posRect.c_str(), posRect.length()+1);

        cvPutText(img1, p, cvPoint(200,400), &font, cvScalar(0,255,0));

        char* p2 = new char[posPoint.length()+1];
        memcpy(p2, posPoint.c_str(), posPoint.length()+1);
        cvPutText(img1, p2, cvPoint(200,430), &font, cvScalar(255,255,255));

        imshow("video capture", img);
    //    imshow("video capture2", img2);

        if (waitKey(1) >= 0)
            break;

    }

    //namedWindow("video capture", CV_WINDOW_AUTOSIZE);

    return 0;
}
コード例 #21
0
///////////////////////////////////////////////////////////////////////////////////////////////////
//                                                                                               //
//                                            Main                                               //
//                                                                                               //
///////////////////////////////////////////////////////////////////////////////////////////////////
int main()
{   
    // start system timer
    startTime = microSecond();
    
    // Get current data and time
    time_t rawTime;
    time(&rawTime);
    string numTime;
    numTime = getNumTime(&rawTime);
    printf ( "The current local time is: %s \n", ctime(&rawTime) );
    
    // Initialize AHRS
    char* gx3_dev_name;
    // char a='\0';
    // gx3_dev_name = &a;
    gx3_dev_name = scandev("microstrain");
    if(strcmp(gx3_dev_name,"")==0){
        printf("Failed to find attached microstrain device.\n");
        return -1;
    }

    printf("Initializing AHRS.....\n");
    while (mip_interface_init(gx3_dev_name, 115200, &ahrs_gx3_25, DEFAULT_PACKET_TIMEOUT_MS) != MIP_INTERFACE_OK) {
        printf("Failed to initialize AHRS, Retrying..... \n");
        return -1;
    } 
    printf("AHRS port: open\n\n");

    // printf("Resetting AHRS.....\n");
    // while(mip_base_cmd_reset_device(&ahrs_gx3_25) != MIP_INTERFACE_OK){
    //     printf("Failed, Retrying.....\n");
    // } // resetting AHRS device
    // printf("AHRS: reset\n");

    printf("Idling AHRS.....\n");
    while(mip_base_cmd_idle(&ahrs_gx3_25) != MIP_INTERFACE_OK){
        printf("Failed, Retrying.....\n");
    } // idling AHRS device
    printf("AHRS: idle\n\n");

    // Set callback
    if(mip_interface_add_descriptor_set_callback(&ahrs_gx3_25, MIP_AHRS_DATA_SET, NULL, &ahrs_packet_callback) != MIP_INTERFACE_OK) {
        printf("Failed to register callback\n");
        return -1;
    } else {
        printf("Registered callback function succesfully\n\n");
    }

    // Setup AHRS
    u8 enable = 1;
    u16 ahrs_rate = 0;
        while(mip_3dm_cmd_get_ahrs_base_rate(&ahrs_gx3_25, &ahrs_rate) != MIP_INTERFACE_OK){
        printf("Failed!! Retrying.....\n");
    }
    printf("Rate of AHRS message is: %d\n\n", ahrs_rate);

    // Setup the AHRS message format and verify via read-back
    u8  data_stream_format_descriptors[10] = {0};
    u16 data_stream_format_decimation[10]  = {0};
    u8  data_stream_format_num_entries     =  0;

    data_stream_format_descriptors[0] = MIP_AHRS_DATA_ACCEL_SCALED; 
    data_stream_format_descriptors[1] = MIP_AHRS_DATA_GYRO_SCALED; 
    data_stream_format_descriptors[2] = MIP_AHRS_DATA_EULER_ANGLES;

    data_stream_format_decimation[0]  = 0x02; 
    data_stream_format_decimation[1]  = 0x02; 
    data_stream_format_decimation[2]  = 0x02; 

    data_stream_format_num_entries = 3;
 
    //Set the message format
    printf("Setting the AHRS datastream format....");
    while(mip_3dm_cmd_ahrs_message_format(&ahrs_gx3_25, MIP_FUNCTION_SELECTOR_WRITE, &data_stream_format_num_entries, 
                                       data_stream_format_descriptors, data_stream_format_decimation) != MIP_INTERFACE_OK) {
        printf("Failed!! Retrying.....\n");
    }
    printf("AHRS datastream format: set\n\n");

    printf("Enabling continuous data stream.....\n");
    while(mip_3dm_cmd_continuous_data_stream(&ahrs_gx3_25, MIP_FUNCTION_SELECTOR_WRITE, MIP_3DM_AHRS_DATASTREAM, &enable) != MIP_INTERFACE_OK){
        printf("Failed!! Retrying.....\n");
    }
    printf("Continuous data stream : enabled\n\n");



    /***************************** APM Initialize ************************************************/
    char* apm_dev_name;
    // char a='\0';
    // apm_dev_name = &a;
    apm_dev_name = scandev("arduino_mega");
    if(strcmp(apm_dev_name,"")==0){
        printf("Failed to find attached APM device.\n");
        return -1;
    }
    apm_link.beginPort(apm_dev_name,115200);


    /***************************** Video Initialize **********************************************/
#if DISPLAY == ENABLED
    namedWindow(window1,CV_WINDOW_AUTOSIZE);
#endif
    
    // Check capture device status
    videoSrc0.set(CV_CAP_PROP_FRAME_WIDTH, 1024);
    videoSrc0.set(CV_CAP_PROP_FRAME_HEIGHT, 768);
    if (!videoSrc0.isOpened()) {
        printf("capture device failed to open!\n");
        return -1;
    } else {
        printf("capture device : open\n\n\n");
    }

    // create dir and files
    mainPath += numTime;
    cout<<"saving files to "<<mainPath<<endl;
    string accPath, gyroPath, framePath, imgPath, attPath, altPath;
    imgPath   = mainPath+"/img";
    accPath   = mainPath+"/acc";
    gyroPath  = mainPath+"/gyro";
    attPath   = mainPath+"/attitude";
    framePath = mainPath+"/framedata";
    altPath   = mainPath+"/alt";
    string dir_cmd1 = "mkdir "+ mainPath;
    string dir_cmd2 = "mkdir "+ imgPath;
    // creating paths
    system(dir_cmd1.c_str());
    system(dir_cmd2.c_str());
    // openning files
    logACC   = fopen(accPath.c_str(), "w");
    logGYRO  = fopen(gyroPath.c_str(), "w");
    logFrame = fopen(framePath.c_str(),"w");
    logATT   = fopen(attPath.c_str(),"w");
    logALT   = fopen(altPath.c_str(),"w");
    fprintf(logACC,  "     time(us)    xacc(g)    yacc(g)    zacc(g)\n");
    fprintf(logGYRO, "     time(us)    xgyro      ygyro      zgyro\n");
    fprintf(logATT,  "     time(us)    roll(rad)   pitch(rad)     yaw(rad)\n");
    fprintf(logALT,  "     time(us)  altitude(m)\n");
    // Mainloop
    pthread_t thread1, thread2, thread3, thread4;

    pthread_create( &thread1, NULL, runThread1, NULL);
    pthread_create( &thread2, NULL, runThread2, NULL);
    pthread_create( &thread3, NULL, runThread3, NULL);
    pthread_create( &thread4, NULL, runThread4, NULL);
    pthread_join( thread1, NULL );
    pthread_join( thread2, NULL );
    pthread_join( thread3, NULL );
    pthread_join( thread4, NULL );

    return 0;
}
コード例 #22
0
ファイル: main.cpp プロジェクト: Riseley/Drone
int main(int argc, char** argv)
{
   Options o;
   parse_command_line(argc, argv, o);

   bool use_camera;
   VideoCapture cap;
   VideoWriter writer;

   // Use filename if given, else use default camera
   if( !o.infile.empty() )
   {
      cap.open(o.infile);
      use_camera = false;
   }
   else
   {
      cap.open(0);
      use_camera = true;
   }

   if( !cap.isOpened() )
   {
      cerr << "Failed to open capture device" << endl;
      exit(2);
   }

   if( !o.outfile.empty() )
   {
      int fps = cap.get(CV_CAP_PROP_FPS);
      int width = cap.get(CV_CAP_PROP_FRAME_WIDTH);
      int height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
      writer.open(o.outfile, CV_FOURCC('j', 'p', 'e', 'g'), fps, Size(width, height));
      if( !writer.isOpened() )
      {
	 cerr << "Could not open '" << o.outfile << "'" << endl;
	 exit(1);
      }
      use_camera = false;
   }

   // Open window and start capture
   namedWindow(WINDOW, CV_WINDOW_FREERATIO | CV_GUI_NORMAL);


   StateData d(o.num_particles, o.use_lbp);
   State state = state_start;
   Mat frame, gray;

   lbp_init();

   // Main loop

   for(;;)
   {

      // Start timing the loop
      

      // Capture frame
      if( !d.paused)
      {
	 cap >> frame;
	 if(frame.empty())
	 {
	    cerr << "Error reading frame" << endl;
	    break;
	 }
      }
      if( use_camera )
      {
	 flip(frame, d.image, 1);
      }
      else
      {
	 frame.copyTo(d.image);
      }
      
      // Set up all the image formats we'll need
      if(d.use_lbp)
      {
	 cvtColor(d.image, gray, CV_BGR2GRAY);
	 lbp_from_gray(gray, d.lbp);
      }
      else
      {
	 if( d.lbp.empty() )
	    d.lbp = Mat::zeros(d.image.rows, d.image.cols, CV_8UC1);
      }

      // Handle keyboard input
      char c = (char)waitKey(10);
      if( c == 27 )
	 break;
      switch(c)
      {
	 case 'p':
	    d.paused = !d.paused;
	    break;

	 case 'c':
	    cout << "Tracking cancelled." << endl;
	    state = state_start;
	    break;

	 case 'd':
	    d.draw_particles = !d.draw_particles;
	    cout << "Draw particles: " << d.draw_particles << endl;
	    break;
      }

      // Process frame in current state
      state = state(d);


      // Elapsed time in seconds
/*
      timeval end_time;
      gettimeofday(&end_time, 0);
      float dt = (float)(end_time.tv_sec - start_time.tv_sec) + ((float)(end_time.tv_usec - start_time.tv_usec)) * 1E-6;       
      cout << "Frame rate: " << 1.f / dt << endl;
*/
      Mat target_display_area(d.image, Rect(d.image.cols - d.selection.width, 0, d.selection.width, d.selection.height));
	 d.target.copyTo(target_display_area);
  

      imshow(WINDOW, d.image);
   }
コード例 #23
0
ファイル: main.cpp プロジェクト: fehlfarbe/lutherprototype
int main( int argc, const char* argv[] )
{

    VideoCapture *cap;
    if( argc > 1){
        cout << "Open file " << argv[1] << endl;
        cap = new VideoCapture(argv[1]);
    }
    else{
        cout << "Open Videodevice 0" << endl;
        cap = new VideoCapture(0);
    }


    if( !cap->isOpened() ){
        cout << "Can't open videodevice" << endl;
        return -1;
    }

    int count = 0;
    Mat frame;
    time_t start, end;
    cap->read(frame);

    // Setup detector
    Facedetector detector = Facedetector();
    // activate / deactivate background substraction
    detector.bgSubtraction = bgSub;


    // load cascade files for frontal and profile face detection
    if(!detector.loadFrontCascade("../lbpcascade_frontalface.xml")){
        cout << "Can't load cascade file";
        return -1;
    }
    if(!detector.loadProfileCascade("../lbpcascade_profileface.xml")){
        cout << "Can't load cascade file";
        return -1;
    }

    //Setup output window
    namedWindow("Output");
    setMouseCallback("Output", selectROI);

    //Start endless loop
    time(&start);
    while(!frame.empty()){
        count++;

        //face detection
        output = detector.detect(frame);
        vector<Face> faces = detector.getFaces();
        //output = frame;
        if( drawRoi ){
            drawROISelection();
        } else if(roi.area() > 0 ){
            detector.roiBottom = output.rows - (roi.y + roi.height);
            detector.roiTop = roi.y;
        }

        //Ouput Window
        time(&end);
        double fps = double(count) / difftime(end, start);
        ostringstream stream;
        stream << facesh.size() << " Faces detected (" << fps << " fps)";
        putText(output, stream.str(), Point(5, output.rows-10), 1, 1, Scalar(255, 255, 255));
        imshow("Output", output);
        if( (waitKey(10) & 255) == 'c' ){
            cout << "Abort..." << endl;
            break;
        }

        //write images
        if(writeIM){
            ostringstream filename;
            time_t t = time(0);
            filename << writeDst << "/" << t << "_" << count << ".jpg";
            imwrite(filename.str(), output);
        }

        //cout << faces.size() << " faces ";
        //cout << "(" << 1.0 / ((float(clock()-t)/CLOCKS_PER_SEC)) << "fps)" << endl;

        //read next frame
        cap->read(frame);
    }

    //cleanup
    cap->release();
    delete cap;
    cout << "End.." << endl;

    return 0;
}
コード例 #24
0
/*
 * To work with Kinect or XtionPRO the user must install OpenNI library and PrimeSensorModule for OpenNI and
 * configure OpenCV with WITH_OPENNI flag is ON (using CMake).
 */
int main( int argc, char* argv[] )
{   
    time_t start = time(0);
    bool isColorizeDisp, isFixedMaxDisp;
    int imageMode;
    bool retrievedImageFlags[5];
    string filename;
    bool isVideoReading;
    //parseCommandLine( argc, argv, isColorizeDisp, isFixedMaxDisp, imageMode, retrievedImageFlags, filename, isVideoReading );

    if (pcl::io::loadPCDFile<pcl::PointXYZ> ("test_pcd.pcd", *cloud_golden) == -1) //* load the file
    {
    	PCL_ERROR ("Couldn't read file test_pcd.pcd \n");
    	return (-1);
    }
    std::cout << "Loaded "
            << cloud_golden->width * cloud_golden->height
            << " data points from test_pcd.pcd with the following fields: "
            << std::endl;
// 

    pcl::copyPointCloud (*cloud_golden, *cloud_transformed);
    cout << "Device opening ..." << endl;
    cout << CV_CAP_OPENNI <<endl;
    VideoCapture capture;
    if( isVideoReading )
        capture.open( filename );
    else
        capture.open(CV_CAP_OPENNI);

    cout << "done." << endl;

    if( !capture.isOpened() )
    {
        cout << "Can not open a capture object." << endl;
        return -1;
    }

    if( !isVideoReading )
    {
        bool modeRes=false;
        switch ( imageMode )
        {
            case 0:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ );
                break;
            case 1:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_15HZ );
                break;
            case 2:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_30HZ );
                break;
                //The following modes are only supported by the Xtion Pro Live
            case 3:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_30HZ );
                break;
            case 4:
                modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_60HZ );
                break;
            default:
                CV_Error( CV_StsBadArg, "Unsupported image mode property.\n");
        }
        if (!modeRes)
            cout << "\nThis image mode is not supported by the device, the default value (CV_CAP_OPENNI_SXGA_15HZ) will be used.\n" << endl;
    }
    if(capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) == 0) capture.set(CV_CAP_PROP_OPENNI_REGISTRATION,1);
    // Print some avalible device settings.
    cout << "\nDepth generator output mode:" << endl <<
            "FRAME_WIDTH      " << capture.get( CV_CAP_PROP_FRAME_WIDTH ) << endl <<
            "FRAME_HEIGHT     " << capture.get( CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
            "FRAME_MAX_DEPTH  " << capture.get( CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl <<
            "FPS              " << capture.get( CV_CAP_PROP_FPS ) << endl <<
            "REGISTRATION     " << capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) << endl;
    if( capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) )
    {
        cout <<
            "\nImage generator output mode:" << endl <<
            "FRAME_WIDTH   " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) << endl <<
            "FRAME_HEIGHT  " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
            "FPS           " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl;
    }
    else
    {
        cout << "\nDevice doesn't contain image generator." << endl;
        if (!retrievedImageFlags[0] && !retrievedImageFlags[1] && !retrievedImageFlags[2])
            return 0;
    }
    if( !face_cascade.load( cascade_name[0] ) )
    { 
	printf("--(!)Error loading\n"); return -1; 
    };
    if( !eyes_cascade.load( cascade_name[1] ) )
    { 
	printf("--(!)Error loading\n"); return -1; 
    };
    //printf("Entering for\n");

    int last_printed = 0;
    int WAIT_SEC = 10;

    viewer = simpleVis(cloud_transformed);
    for(;;)
    {
        Mat depthMap;
        Point image_center;
        Mat Display_image;
        Mat validDepthMap;
        Mat disparityMap;
        Mat bgrImage;
        Mat grayImage;
        Mat show;
        double seconds_since_start = difftime( time(0), start);

        if( !capture.grab() )
        {
            cout << "Can not grab images." << endl;
            return -1;
        }
        else
        {
            if( capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP ) )
            {
                const float scaleFactor = 0.05f;
		depthMap.convertTo( show, CV_8UC1, scaleFactor );
                //imshow( "depth map", show );
            }

            if( capture.retrieve( bgrImage, CV_CAP_OPENNI_BGR_IMAGE ) ) {
                
            // Align nose with the circle


                int rad = 40;
               	int row_rgb = bgrImage.rows;
            	int col_rgb = bgrImage.cols;
                image_center.y = row_rgb/2 - 100;
                image_center.x = col_rgb/2;
                Display_image = bgrImage.clone();
                // Copying bgrImage so that circle is shown temporarily only
                circle( Display_image, image_center, rad, Scalar( 255, 0, 0 ), 3, 8, 0 );
                imshow( "rgb image", Display_image );

                // Wait for a key Press
                //std::cin.ignore();
                // Now it will capture Golden data 
            }

        /*    if( retrievedImageFlags[4] && capture.retrieve( grayImage, CV_CAP_OPENNI_GRAY_IMAGE ) )
                imshow( "gray image", grayImage );*/

        int seconds = int(seconds_since_start);
        if(last_printed<seconds && seconds<=WAIT_SEC){
            printf(" Capturing Golden Face template after %d Seconds ...\n\n", WAIT_SEC - seconds);
                last_printed=seconds;
        }
            
	    if(!depthMap.empty() && !bgrImage.empty() && (seconds_since_start > WAIT_SEC)) 
		    detectAndDisplay(bgrImage, depthMap, argc, argv);
	    
	    //writeMatToFile("depth.txt",depthMap);
        }

  	viewer->spinOnce (10);
  	boost::this_thread::sleep (boost::posix_time::microseconds (10));
        viewer->removePointCloud("sample cloud");
  	viewer->addPointCloud<pcl::PointXYZ> (cloud_transformed, "sample cloud");
  	viewer->setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "sample cloud");

        if( waitKey( 30 ) >= 0 )
            break;
    }
    Trans_dump.close();
    return 0;
}
コード例 #25
0
int main(){
    
    bool debugMode = false;
    bool trackingEnabled = false;
    bool pause = false;
    
    Mat Image,debugImage;;
    Mat currentMat,prevMat;
    Mat currentDiff,prevDiff;
    Mat thresholdImage;
    
    VideoCapture capture;
    
    capture.open(0);
    
    if(!capture.isOpened()){
        cout<<"ERROR ACQUIRING VIDEO FEED\n";
        getchar();
        return -1;
    }
    
    while(1){

        capture.read(Image);
        Image.copyTo(currentMat);
        cv::cvtColor(currentMat,currentMat, COLOR_BGR2GRAY);
        
        if (prevMat.empty())
        {
            currentMat.copyTo(prevMat);
            continue;
        }
        
        cv::absdiff(currentMat, prevMat, currentDiff);
        
        //threshold intensity image at a given sensitivity
        cv::threshold(currentDiff,currentDiff,SENSITIVITY_VALUE,255,THRESH_BINARY);
        
        //blur the image to get rid of the noise.
        cv::blur(currentDiff,currentDiff,cv::Size(BLUR_SIZE,BLUR_SIZE));

        //threshold again to obtain binary image from blur output
        cv::threshold(currentDiff,currentDiff,SENSITIVITY_VALUE,255,THRESH_BINARY);

        if (prevDiff.empty())
        {
            currentDiff.copyTo(prevDiff);
        }

        cv::bitwise_and(currentDiff, prevDiff, thresholdImage);
        if(debugMode == true){
            //show the threshold image after it's been "blurred"
            flip(thresholdImage, debugImage, 1);
            imshow("Final Threshold Image",debugImage);
            
        }
        else {
            //if not in debug mode, destroy the windows so we don't see them anymore
            cv::destroyWindow("Final Threshold Image");
        }
        
        //if tracking enabled, search for contours in our thresholded image
        if(trackingEnabled){
            searchForMovement(thresholdImage,Image);
        }
        
        flip(Image, Image, 1);
        //show our captured frame
        imshow("Frame1",Image);
        
        currentMat.copyTo(prevMat);
        currentDiff.copyTo(prevDiff);
        
        switch(waitKey(10)){
                
            case 27: //'esc' key has been pressed, exit program.
                capture.release();
                return 0;
            case 116: //'t' has been pressed. this will toggle tracking
                trackingEnabled = !trackingEnabled;
                if(trackingEnabled == false) cout<<"Tracking disabled."<<endl;
                else cout<<"Tracking enabled."<<endl;
                break;
            case 100: //'d' has been pressed. this will debug mode
                debugMode = !debugMode;
                if(debugMode == false) cout<<"Debug mode disabled."<<endl;
                else cout<<"Debug mode enabled."<<endl;
                break;
            case 112: //'p' has been pressed. this will pause/resume the code.
                pause = !pause;
                if(pause == true){
                    cout<<"Code paused, press 'p' again to resume"<<endl;
                    while (pause == true){
                        //stay in this loop until
                        if (!currentMat.empty())
                        {
                            currentMat.release();
                            prevMat.release();
                            currentDiff.release();
                            prevDiff.release();
                        }
                        switch (waitKey()){
                                //a switch statement inside a switch statement? Mindblown.
                            case 112:
                                //change pause back to false
                                pause = false;
                                cout<<"Code Resumed"<<endl;
                                break;
                        }
                    }
                }
        }
        //capture.release();
    }
    return 0;
    
}
int main(int argc, char** argv)
{
	glutInit(&argc, argv);
	glutInitDisplayMode(GLUT_DOUBLE|GLUT_RGB|GLUT_DEPTH);
	glutInitWindowSize(400,400);

	Mat image;
	Mat Rot, Tran;
	int numcorners;
	
	Mat icovar;
	Scalar meanmat;
	double covar[3][3]={{35.2189, 146.3495, 105.9640},{146.3495,801.1402,527.6974},{105.9640,527.6974,553.3654}};
	meanmat[0]=15.5662;
	meanmat[1]=118.3597;
	meanmat[2]=48.5153;

	Mat covmat(3,3,CV_64F,covar);
	
	Mat mask = Mat::zeros(camlen, camwidth, CV_8UC1);	//create matrix same size as image which is 480 by 640 based on the webcam capture
	icovar=inversemat(covmat);		//determinant of covariance matrix is zero. SOLVED
	
	float distance = 250;
	int elemsize=3;
	
	Mat element = getStructuringElement(0, Size( 2*elemsize + 1, 2*elemsize+1 ), Point( elemsize, elemsize ) );
	
	Mat corners;
	cout<<"3D Image Reconstruction Program v1.0"<<endl;
	cout<<"-----------------------------------------------------------"<<endl;
	cout<<"Produced by: Seifullaah Sherrief"<<endl;
	cout<<"\nWelcome to the 3D Image reconstruction program, which will convert an object"<<endl;
	cout<<"into a 3D model. You will require the calibration chart with the four marked "<<endl;
	cout<<"circle corners, an object and a green coloured background to place the chart "<<endl;
	cout<<"and object in front. please refer to the User manual for operational\ninstructions."<<endl;
	cout<<"Press 'r' when ready to render and press 'esc' for the final 3D model"<<endl;
	cout<<"-------------------------------------------------------------------------"<<endl;
		
	vector<vector<Point3f>> object_points;
    vector<vector<Point2f>> image_points;

	vector<Point3f> obj;
	vector<Point2f> img;
    
	vector<Point3f> threedpoint;
	vector<Point2f> projectedpoints;
	
	Mat intrinsic = Mat(3, 3, CV_32FC1);
	Mat distCoeffs;
	vector<Mat> rvecs;
	vector<Mat> tvecs;
	
	intrinsic.ptr<float>(0)[0] = 1;
	intrinsic.ptr<float>(1)[1] = 1;
	Mat silhouette;
	int objtemp=0;
	VideoCapture webcam;
	webcam.open(-1);	
	
	bool render=false;
	int tempry=0;

	//distance for note purpose
	//rectangle horizontally dot to dot 2620 vertically 1750mm
	//square horizontally dot to do 1733 vertically 1750mm
	
	//int sz[] = {lenx,leny,lenz};
	//Mat threedimension(3,sz,CV_32F,Scalar::all(1.0));  //create 3dim matrix, type 32 filled with 1s.
	double threedimension[30][30][30];
    
	for(int i=0; i<30; i++)
	{
		for(int j=0; j<30; j++)
		{
			for(int k=0;k<30;k++)
			{
				threedimension[i][j][k]=1.0;
			}
		}
	}

	cout<<"Enter number of corners to detect (must be greater than 4) e.g 5: "<<endl;
    cin>>numcorners;
	
	if(!webcam.isOpened())
	{
		cout<<"\nThe Camera is being used by another application, make sure all applications using the camera are closed and try running this program again."<<endl;
		system("PAUSE");
		return 0;
	}

	obj.push_back(Point3f(0,0,0));
	obj.push_back(Point3f(30.0,0,0));
	obj.push_back(Point3f(0,30.0,0));
	obj.push_back(Point3f(30.0,30.0,0));
	
	glutCreateWindow("Temporary Visual of 3D Model");
	initRendering();
	while(1)
	{
		//copy webcam stream to image
		webcam>>image;
		glutKeyboardFunc(handleKeypress);
		glutReshapeFunc(handleResize);
		int key=waitKey(1);
		if(key=='r'){render=true;}
		#pragma omp parallel sections
		{
			#pragma omp section
			{
				silhouette=imagesegmentation(image,icovar,meanmat,distance,mask,element); 	
			}
			#pragma omp section
			{
				corners=Cornerdetect(image,corners,numcorners);
			}			
		}

		if(corners.rows>0)
		{
			for(int i=0;i<4;i++)
			{
					//draws circle on image, at centre at point, color, thickness, line type, 
					circle(image,corners.at<Point2f>(i),3,CV_RGB(255,0,0),1,8,0);
					//obj.push_back(Point3f(float(objtemp/2), float(objtemp%2), 0.0f));		//setting up the units of calibration
					img.push_back(corners.at<Point2f>(i));		
					objtemp++;
			}
			if(objtemp==4)
			{
				image_points.push_back(img);
				object_points.push_back(obj);
				calibrateCamera(object_points, image_points, image.size(), intrinsic, distCoeffs, rvecs, tvecs); 
				Rot=rvecs[0];
				Tran=tvecs[0];
								
				if(render)
				{
					for(float l=0.0;l<30.0;l++)
					{
						for(float w=0.0;w<30.0;w++)
						{
							for(float h=0.0;h<30.0;h++)
							{
								threedpoint.push_back(Point3f(l,w,h));		
							}
						}
					}
				
					projectPoints(threedpoint,Rot,Tran,intrinsic,distCoeffs,projectedpoints);

					// Allocate the vector
					vertexpoints.resize(projectedpoints.size(),Point3f(0,0,0));

					#pragma omp parallel for
					for(int index=0;index<projectedpoints.size();index++)
					{
						int dx = threedpoint[index].x, dy=threedpoint[index].y,dz=threedpoint[index].z;
						double check = threedimension[dx][dy][dz];
						if(check==1)
						{
							vertexpoints[index] = Point3f(dx,dy,dz);
							if(float(mask.at<uchar>(projectedpoints[index]))==255.0)
							{
								threedimension[dx][dy][dz]=0.0;
							}							
						}
					}
					glutDisplayFunc(drawScene);
					glutTimerFunc(25, Update, 0); //call update function every 25ms.decrease and it will call it quicker and will rotate faster
					glutMainLoopEvent();	//from here you can specifiy your own glutmainloop event which ones to do, this will do one iteration and continue through
											//out the while loop hopefully
					if(tempry==1)
					{break;}
					tempry++;
				}
				
			}
			imshow("original", image);
			waitKey(30);
			imshow("mask",mask);
			waitKey(30);			//this is to give the processor some time to display the image
			//rendering over here with the displays, loop through each points which is a one to get a list of vectors (vertexpoints) then draw using those points by looping through it
			//after rendering clear it so it doesn't stack on top		
		}
		objtemp=0;
		img.clear();
		image_points.clear();
		object_points.clear();
		vertexpoints.clear();

	}
	webcam.release();
	destroyWindow("original");
	destroyWindow("mask");
	cout<<"The Final 3D Model is Being Rendered and will be displayed Shortly."<<endl;
	cout<<"\nThank you for using the 3D Image Reconstruction Software"<<endl;
	finalrender=true;
	//finalrender is for switching between different rendering 
	//check to see if any of the neighbour voxels are zero and then just push those vertices to the vertex and render triangle fan
	//we have the final 3d array
	
	glutReshapeFunc(handleResize);
	glutDisplayFunc(drawScene);
	glutTimerFunc(25, Update, 0); //call update function every 25ms.decrease and it will call it quicker and will rotate faster
	glutMainLoop();
	return 0;
}
コード例 #27
0
int main( int argc, char** argv )
{
	int cont = 0;
	namedWindow( "Head tracking", 1 );
	setMouseCallback( "Head tracking", onMouse, 0 );
	moveWindow("Head tracking",0,0);
	namedWindow( "Mascara", 1 );
	moveWindow("Mascara",600,0);
	namedWindow( "Imagen gris", 1 );
	moveWindow("Imagen gris",0,600);
		//namedWindow( "Matriz Back", 1 );
	char s[1024], *t, u[1024],*v;

	double _prvec[3] = { 0, 0, 0 };
	double _ptvec[3] = { 0, 0, 0 };
	CvPoint2D32f projectedPoints[N];
	CvPoint3D32f modelPoints[2*N];
	vector<Point2f> points[2];
	CvFont defFont;
	vector<cv::Rect> faces;
	VideoCapture cap;
	TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03);
	Size winSize(10,10);
	cap.set(CV_CAP_PROP_FRAME_WIDTH, W);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, H);
	bool needToInit = false;
	bool nightMode = false;
	float xCM, yCM;
	CvPOSITObject *positObject;
	trainMachine();
	/*CvPoint3D32f modelPoints[N] = {
			{ 0.0f, 0.0f, 0.0f },
			{0.0f, 0.0f,0.0f},
			{0.0f, 0.0f, 0.0f},
			{ 0.0f, 0.0f, 0.0f}
		};*/





	if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
	        cap.open(argc == 2 ? argv[1][0] - '0' : 0);
	else if( argc == 2 )
		cap.open(argv[1]);
	if( !cap.isOpened() )
	{
		cout << "No se encontro la camara...\n";
	    return 0;
	}

	help();
	Mat gray, prevGray, image;









	for(;;)
	{
	    Mat frame;

	    cap >> frame;
	    if( frame.empty() )
	    cout<<"No se encontro frame"<<endl;

	    frame.copyTo(image);
	    putText(image,"Sincronismo",cvPoint(500,22), 1, 0.8, cvScalar(25,25,25), 1, 0);
	    putText(image,"Alarma",cvPoint(500,62), 1, 0.8, cvScalar(25,25,25), 1, 0);
	    circle( image, Point(600,20), 11, Scalar(0,0,0), 1, 8);
	    circle( image, Point(600,60), 11, Scalar(0,0,0), 1, 8);

	    std::vector<cv::Mat> rgbChannels(3);
	    split(image, rgbChannels);
	    Mat gray = rgbChannels[2];
	    equalizeHist( gray, gray );
	    Mat maskNose (gray.size(), CV_8UC1);
	    Mat maskLeft (gray.size(), CV_8UC1);
	    //circle( image, Point(600,20), 11, Scalar(0,0,0), 1, 8);
	    for( unsigned i = 0; i < faces.size(); i++ )
	    {
	    	rectangle(frame, faces[i], 1234);
	    }
	    if( nightMode )
	    	image = Scalar::all(0);

	    if( needToInit )
	    {
	        // Inicializacion automática
	        faces = detectFace(gray);



	        if(faces.size()!=0)
	        {
	        	//cvWaitKey(99);
	        	Scalar color(255,255,255);

	        	circle( image, Point(600,20), 10, Scalar(0,255,0), -1, 8);

	        	//-- Encontrar y dibujar la region de los ojos
	        	int eye_region_width = faces[0].width * (kEyePercentWidth/100.0);
	        	int eye_region_height = faces[0].width * (kEyePercentHeight/100.0);
	        	int eye_region_top = faces[0].height * (kEyePercentTop/100.0)+faces[0].y;
	        	cv::Rect leftEyeRegion(faces[0].width*(kEyePercentSide/100.0)+faces[0].x,eye_region_top,eye_region_width,eye_region_height);
	        	cv::Rect rightEyeRegion(faces[0].width - eye_region_width - faces[0].width*(kEyePercentSide/100.0), eye_region_top,eye_region_width,eye_region_height);
	        	maskLeft.setTo(Scalar::all(0));
	        	rectangle(maskLeft,leftEyeRegion,color,CV_FILLED);

	        	goodFeaturesToTrack(gray, points[0], MAX_COUNT, qualityLevel, 10, maskLeft, 3, useHarrisDetector, 0.04);
	        	cornerSubPix(gray, points[0], winSize, Size(-1,-1), termcrit);
/*	        	for(unsigned i=0;i<points[0].size();i++)
	        	{
	        		if (points[1][i].x<leftEyeRegion.x||points[1][i].x>leftEyeRegion.x+leftEyeRegion.width)

	        	}*/
	        	for(unsigned i=0;i<3&&i<points[0].size();i++)
	        	{
	        		modelPoints[i].x=points[0][i].x-320;
	        		modelPoints[i].y=240-points[0][i].y;
	        		modelPoints[i].z=0.0f;
	        		circle( image, points[0][i], markerThickness, Scalar(255,255,0), -1, 8);
	        	}
	        	cout<<sizeof modelPoints<<endl;
	        	//-- Encontrar y dibujar la region de la nariz
	        	int nose_region_width = faces[0].width * (kNosePercentWidth/100.0);
	        	int nose_region_height = faces[0].width * (kNosePercentHeight/100.0);
	        	int nose_region_top = faces[0].height * (kNosePercentTop/100.0)+faces[0].y;
	        	cv::Rect noseRegion(faces[0].width*(kNosePercentSide/100.0)+faces[0].x,nose_region_top,nose_region_width,nose_region_height);
	        	maskNose.setTo(Scalar::all(0));
	        	rectangle(maskNose,leftEyeRegion,color,CV_FILLED);

	            goodFeaturesToTrack(gray, points[0], MAX_COUNT, qualityLevel, 10, maskNose, 3, useHarrisDetector, 0.04);
	            //goodFeaturesToTrack(gray, points[0], MAX_COUNT, qualityLevel, 10, Mat(), 3, useHarrisDetector, 0.04);
	            cornerSubPix(gray, points[0], winSize, Size(-1,-1), termcrit);

	            addRemovePt = false;

	            for(unsigned i=0;i<3&&i<points[0].size();i++)
	            {
	            	modelPoints[i].x=points[0][i].x-320;
	            	modelPoints[i].y=240-points[0][i].y;
	            	modelPoints[i].z=3.0f;
	            	circle( image, points[0][i], markerThickness, Scalar(0,0,255), -1, 8);
	            }
	            cout<<sizeof modelPoints<<endl;

	            positObject = cvCreatePOSITObject( modelPoints, 6 );
	            imshow("Imagen gris",gray(cvRect(faces[0].x,faces[0].y, faces[0].width, faces[0].height)));
	            cvInitFont(&defFont, CV_FONT_HERSHEY_COMPLEX_SMALL, 0.8f, 0.8f, 0, 1, 1);
	            IplImage *img = cvCreateImage(cvSize(W,H),8,3);
	            cvZero(img);
	        }
	        else //putText(image, "No se detecto la cara", cvPoint(30,30), 1, 0.8, cvScalar(200,200,250), 1, 0);
	        circle( image, Point(600,20), 10, Scalar(0,0,255), -1, 8);
	    }
	    else if( !points[0].empty() )
	    {
	    	vector<uchar> status;
	        vector<float> err;
	        if(prevGray.empty())
	        	gray.copyTo(prevGray);
	        calcOpticalFlowPyrLK(prevGray, gray, points[0], points[1], status, err, winSize, 3, termcrit, 0);
	        cornerSubPix(gray, points[1], winSize, Size(-1,-1), termcrit);
	        circle( image, Point(600,20), 10, Scalar(0,255,0), -1, 8);

	        for(unsigned i=0;i<N&&i<points[1].size();i++)
	        {
	        	projectedPoints[i].x=points[1][i].x;
	        	projectedPoints[i].y=points[1][i].y;
	        }



	        posit(N, modelPoints, projectedPoints, _ptvec, _prvec, positObject);
	        for(int i = 0; i < 3; i++)
	        {
	        	_prvec[i] *= -1;
	        }
	        size_t i, k;

	        for( i = k = 0; i < points[1].size(); i++ )
	        {
	        	if( addRemovePt )
	            {
	        		if( norm(pt - points[1][i]) <= 5 )
	                {
	        			addRemovePt = false;
	                    continue;
	                }
	            }

	            if( !status[i] )
	                continue;

	            points[1][k++] = points[1][i];

	            circle( image, points[1][i], markerThickness, Scalar(0,255,0), -1, 8);
	            circle( maskNose, points[1][i], markerThickness, Scalar(128,128,128), -1, 8);
	        }
	        cont++;
//	        cout<<modelPoints[0].x<<"	"<<modelPoints[1].x<<"	"<<modelPoints[2].x<<"	"<<modelPoints[3].x<<endl;
//	        cout<<projectedPoints[0].x<<"	"<<projectedPoints[1].x<<"	"<<projectedPoints[2].x<<"	"<<projectedPoints[3].x<<endl;
//	       	cout <<_ptvec[0]<<"	"<<_ptvec[1]<<"	"<<_ptvec[2]<<endl;
	       	t = s;
	       	t += sprintf(s,"Traslacion  x=%.1f y=%.1f z=%.1f ",_ptvec[0],_ptvec[1],_ptvec[2]);
	       	t += sprintf(t,"Rotacion x=%.0f y=%.0f z=%.0f ",ANGLE(_prvec[0]),ANGLE(_prvec[1]),ANGLE(_prvec[2]));
	       	putText(image, s, cvPoint(10,20), 1, 0.8, cvScalar(25,25,25), 1, 0);
	       	putText(image, s, cvPoint(10,40), 1, 0.8, cvScalar(225,225,225), 1, 0);
	       	sprintf(u,"Cantidad de fotogramas: %d - Puntos activos: %d",cont,points[0].size());
	       	putText(image,u,cvPoint(10,60), 1, 0.8, cvScalar(25,25,25), 1, 0);
	       	putText(image,u,cvPoint(10,80), 1, 0.8, cvScalar(225,225,225), 1, 0);
	       	putText(image,u,cvPoint(10,80), 1, 0.8, cvScalar(225,225,225), 1, 0);
	       	if(archivo)
	       	{
	       		ofstream myfile ("datos.txt",ios::app);
	       		myfile<<_ptvec[0]<<" "<<_ptvec[1]<<" "<<_ptvec[2]<<" "<<_prvec[0]<<" "<<_prvec[1]<<" "<<_prvec[2]<<" 1"<<endl;
	       		myfile.close();
	       	}
	        points[1].resize(k);
	    }

	    if( addRemovePt && points[1].size() < (size_t)MAX_COUNT )
	    {
	    	vector<Point2f> tmp;
	        tmp.push_back(pt);
	        cornerSubPix( gray, tmp, winSize, cvSize(-1,-1), termcrit);
	        points[1].push_back(tmp[0]);
	        addRemovePt = false;
	    }
	    if (faces.size()!=0 && points[0].size()>MAX_INIT)
	    	needToInit = false;
	    if(points[0].size()<2)
	    	needToInit = true;




	    if(!points[0].empty())
	    {
	    	Moments m = moments(points[0],false);
	    	xCM=m.m10/m.m00;
	    	yCM=m.m01/m.m00;
//	    	cout<<"x del CM: "<<xCM<<"	y del CM"<<yCM<<endl;
//	    	circle( image, cvPoint(xCM,yCM), 5, Scalar(255,0,0), -1, 8);
//	    	circle(maskNose, cvPoint(xCM,yCM), 5, Scalar(255,0,0), -1, 8);
	    }
	    if(!frame.empty())
	   	{
	    	Mat salida;
	    	if(video) grabarVideo(image, cap);
	    	if(rotacion) rotateImage(image,image,90,120,90,0,0,360,360);
	   	    imshow("Head tracking", image);
	   	    imshow("Mascara",maskNose);

	   	}


	    char c = (char)waitKey(10);
	    if( c == 27 )
	    	break;
	    switch( c )
	    {
	    	case 'r':
	            needToInit = true;
	            break;
	        case 'c':
	            points[1].clear();
	            break;
	        case 'n':
	            nightMode = !nightMode;
	            break;
	        default:
	            ;
	        }


	    std::swap(points[1], points[0]);
	    swap(prevGray, gray);
	}

	return 0;
}
コード例 #28
0
int main(int argc, char** argv){
  Mat image;
  int width, height;
  VideoCapture cap;
  vector<Mat> planes;
  Mat histR, histG, histB;
  int nbins = 64; // This is the L of our equation
  float range[] = {0, 256};
  const float *histrange = { range };
  bool uniform = true;
  bool acummulate = false;

  cap.open(0);

  if(!cap.isOpened()){
    cout << "cameras indisponiveis";
    return -1;
  }

  width  = cap.get(CV_CAP_PROP_FRAME_WIDTH);
  height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);

  cout << "largura = " << width << endl;
  cout << "altura  = " << height << endl;

  int histw = nbins, histh = nbins/2;
  Mat histImgR(histh, histw, CV_8UC3, Scalar(0,0,0));
  Mat histImgG(histh, histw, CV_8UC3, Scalar(0,0,0));
  Mat histImgB(histh, histw, CV_8UC3, Scalar(0,0,0));

  while(1){
    cap >> image;

    equalize_image(image, nbins);

    split (image, planes);

    calcHist(&planes[0], 1, 0, Mat(), histR, 1,
             &nbins, &histrange,
             uniform, acummulate);
    calcHist(&planes[1], 1, 0, Mat(), histG, 1,
             &nbins, &histrange,
             uniform, acummulate);
    calcHist(&planes[2], 1, 0, Mat(), histB, 1,
             &nbins, &histrange,
             uniform, acummulate);

    normalize(histR, histR, 0, histImgR.rows, NORM_MINMAX, -1, Mat());
    normalize(histG, histB, 0, histImgR.rows, NORM_MINMAX, -1, Mat());
    normalize(histB, histB, 0, histImgR.rows, NORM_MINMAX, -1, Mat());

    histImgR.setTo(Scalar(0));
    histImgG.setTo(Scalar(0));
    histImgB.setTo(Scalar(0));

    for(int i=0; i<nbins; i++){
      line(histImgR, Point(i, histh),
           Point(i, cvRound(histR.at<float>(i))),
           Scalar(0, 0, 255), 1, 8, 0);
      line(histImgG, Point(i, histh),
           Point(i, cvRound(histG.at<float>(i))),
           Scalar(0, 255, 0), 1, 8, 0);
      line(histImgB, Point(i, histh),
           Point(i, cvRound(histB.at<float>(i))),
           Scalar(255, 0, 0), 1, 8, 0);
    }
    histImgR.copyTo(image(Rect(0, 0       ,nbins, histh)));
    histImgG.copyTo(image(Rect(0, histh   ,nbins, histh)));
    histImgB.copyTo(image(Rect(0, 2*histh ,nbins, histh)));
    imshow("image", image);
    if(waitKey(30) >= 0) break;
  }
  return 0;
}
コード例 #29
0
ファイル: ff.cpp プロジェクト: BenoitSchillings/funwithfft
int main( int argc, char *argv[] ){
    int max_offset = 280;
    int cmp_width;
 
    char *name1 = argv[1];
 
    VideoCapture cap;
    VideoWriter out;
  
    cap.open(name1);

    int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC));
    out.open("out1.avi",
	     ex,
	     60,
	     Size(1280, 960),
	     false);
    char s[5];
    s[4] = 0;
    memcpy(s, &ex, 4);
    printf("%s\n", s); 
 
    if(!cap.isOpened())  // check if we succeeded
        return -1;
    Mat mat;
    
    int nframes = cap.get(CV_CAP_PROP_FRAME_COUNT);
   
    for (int i = 0; i < 5; i++) { 
	mat = GetFrame(cap);
    } 
    int sy, sx;
    
    sy = mat.size().height;
    sx = mat.size().width;
    cmp_width = sx/4; 
  
    Mat m0 = mat(Rect(300, 300, sx-600, sy-600)).clone();

    Mat sum = mat.clone();
    sum = Scalar(0);

    double t = clock();
    int cnt = 0;
 
    for (int i = 0; i < nframes - 5; i++) {

	mat = GetFrame(cap);

    	m0 = mat(Rect(300, 300, sx-600, sy-600)).clone();

	Point loc1 = match(m0, mat);
        
	//cout << loc1; 
	int dx = loc1.x;
	int dy = loc1.y;
 
	char skip = 0;
	
	if (abs(dx) > max_offset || abs(dy) > max_offset) {
		skip = 1;
	}

	if (dx <= -max_offset) dx = -max_offset;
	if (dy <= -max_offset) dy = -max_offset; 
	if (dx >= max_offset) dx = max_offset;
	if (dy >= max_offset) dy = max_offset;	
	
	copyMakeBorder(mat, mat,
                       abs(dy),
                       abs(dy),
                       abs(dx),
                       abs(dx),
                       BORDER_CONSTANT, Scalar::all(0.0));

	Mat mx =  mat(Rect(abs(dx) + dx,
			   abs(dy) + dy,
			   sx, sy)); 
	
	normalize(mx, mx, 0, 1, NORM_MINMAX, CV_32F);
	
      
	cvWaitKey(1);
	
	if (skip == 0) {
		sum = sum + mx;
	}
        
	cnt++;
	printf("%d\n", i);	
	if (cnt == 25) {
		normalize(sum, sum, 0, 1, NORM_MINMAX, CV_32F);	
 		//m0 = sum(Rect(300, 300, sx-600, sy-600)).clone();

		imshow("m0", sum);
		cnt = 0;
		Mat tmp;

		normalize(sum, tmp, 0, 255, NORM_MINMAX, CV_32F);
		tmp.convertTo(tmp, CV_8U);
		//imshow("tmp",tmp);	
		out << tmp;
		sum = Scalar(0);
	}
    };
}
コード例 #30
0
ファイル: ufacedetect.cpp プロジェクト: 112000/opencv
int main( int argc, const char** argv )
{
    VideoCapture capture;
    UMat frame, image;
    Mat canvas;
    const string scaleOpt = "--scale=";
    size_t scaleOptLen = scaleOpt.length();
    const string cascadeOpt = "--cascade=";
    size_t cascadeOptLen = cascadeOpt.length();
    const string nestedCascadeOpt = "--nested-cascade";
    size_t nestedCascadeOptLen = nestedCascadeOpt.length();
    const string tryFlipOpt = "--try-flip";
    size_t tryFlipOptLen = tryFlipOpt.length();
    String inputName;
    bool tryflip = false;

    help();

    CascadeClassifier cascade, nestedCascade;
    double scale = 1;

    for( int i = 1; i < argc; i++ )
    {
        cout << "Processing " << i << " " <<  argv[i] << endl;
        if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
        {
            cascadeName.assign( argv[i] + cascadeOptLen );
            cout << "  from which we have cascadeName= " << cascadeName << endl;
        }
        else if( nestedCascadeOpt.compare( 0, nestedCascadeOptLen, argv[i], nestedCascadeOptLen ) == 0 )
        {
            if( argv[i][nestedCascadeOpt.length()] == '=' )
                nestedCascadeName.assign( argv[i] + nestedCascadeOpt.length() + 1 );
            if( !nestedCascade.load( nestedCascadeName ) )
                cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
        }
        else if( scaleOpt.compare( 0, scaleOptLen, argv[i], scaleOptLen ) == 0 )
        {
            if( !sscanf( argv[i] + scaleOpt.length(), "%lf", &scale ) || scale > 1 )
                scale = 1;
            cout << " from which we read scale = " << scale << endl;
        }
        else if( tryFlipOpt.compare( 0, tryFlipOptLen, argv[i], tryFlipOptLen ) == 0 )
        {
            tryflip = true;
            cout << " will try to flip image horizontally to detect assymetric objects\n";
        }
        else if( argv[i][0] == '-' )
        {
            cerr << "WARNING: Unknown option %s" << argv[i] << endl;
        }
        else
            inputName = argv[i];
    }

    if( !cascade.load( cascadeName ) )
    {
        cerr << "ERROR: Could not load classifier cascade" << endl;
        help();
        return -1;
    }

    cout << "old cascade: " << (cascade.isOldFormatCascade() ? "TRUE" : "FALSE") << endl;

    if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
    {
        int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0';
        if(!capture.open(c))
            cout << "Capture from camera #" <<  c << " didn't work" << endl;
    }
    else
    {
        if( inputName.empty() )
            inputName = "lena.jpg";
        image = imread( inputName, 1 ).getUMat(ACCESS_READ);
        if( image.empty() )
        {
            if(!capture.open( inputName ))
                cout << "Could not read " << inputName << endl;
        }
    }

    namedWindow( "result", 1 );

    if( capture.isOpened() )
    {
        cout << "Video capturing has been started ..." << endl;
        for(;;)
        {
            capture >> frame;
            if( frame.empty() )
                break;

            detectAndDraw( frame, canvas, cascade, nestedCascade, scale, tryflip );

            if( waitKey( 10 ) >= 0 )
                break;
        }
    }