Beispiel #1
0
int trainData() {

    std:: string videoName="";

    int n_frames[1000];
    //create dictionary
    int dict_size=100;//***

    Mat features;
    for(int i=1; i<no_videos; i++) {


        stringstream temp;
        temp<<i;
        std::string no=temp.str();
        videoName="C:/Rasika/trainvideos/video_"+no+".avi"; //*** path can be changed

        //initialize capture
        VideoCapture cap;
        cap.open(videoName);
        if(!cap.isOpened())  // check if we succeeded
            return -1;

        double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        //create window to show image
        //namedWindow("Video",1);
        //cout<<count<<endl;
        int jump=count/N;
        int j=1;

        int u=0;
        if(count<10) {
            jump=1;
        }
        int cnt=jump;
        while(u<10) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );

            ////////EXTRACT INTEREST POINTS USING SIFT////
            // vector of keypoints
            std::vector<cv::KeyPoint> keypoints;
            // Construct the SIFT feature detector object
            SiftFeatureDetector sif(0.03,10.); // threshold  //***
            //Detect interest points
            sif.detect(gray_image,keypoints);

            ////////IMSHOW THE FRAMES EXTRACTED///////////

            //copy video stream to image
            //cap>>image;
            //print image to screen
            //imshow("Video",image);


            ///////////Save the frames//////////////

            stringstream temp2;
            temp2<<j;
            std::string no2=temp2.str();
            std::string frame_name="frame"+no2+".jpg";
            imwrite(frame_name,image);


            //////////////Draw the keypoints////////////

            /*
            Mat featureImage;
            // Draw the keypoints with scale and orientation information
            drawKeypoints(image, // original image
            keypoints, // vector of keypoints
            featureImage, // the resulting image
            Scalar(255,0,255), // color of the points
            DrawMatchesFlags::DRAW_RICH_KEYPOINTS); //flag
            //std::string name="image"+i;
            imshow(frame_name, featureImage );
            */

            ////////////////////detect decriptors//////////////////

            SiftDescriptorExtractor siftExtractor;
            Mat siftDesc;
            siftExtractor.compute(gray_image,keypoints,siftDesc);
            features.push_back(siftDesc);//add the descriptors from each frame..to create one for a video

            ////////////////
            //delay 33ms //***
            //waitKey(33);

            cnt+=jump;
            j++;
            u++;
            ///next frame for the same video
        }

        //store number of frames per video
        n_frames[i-1]=j-1;



    }

    TermCriteria term(CV_TERMCRIT_ITER,100,0.001);//***

    //retries number ***
    int retries=1;

    int flags=KMEANS_PP_CENTERS;
    BOWKMeansTrainer bowTrainer(dict_size,term,retries,flags);
    //cluster the feature vectors
    Mat dictionary=bowTrainer.cluster(features);

    //for further process
    full_dictionary.push_back(dictionary);
    ///////////////////////////////////////////////////
    FileStorage fs("full_dictionary.yml", FileStorage::WRITE);
    fs << "vocabulary" << full_dictionary;
    fs.release();
    //Created Vocabulary

    //Calculate histograms for the train videos
    //idf_vector(full_dictionary);

    return 0;
}
void doDetect (void)
{
  Mat src, dst;
  VideoCapture cap;
  /// Load image
  //src = imread( "pic.jpg", 1 );

  //delay(WAITPHOTO);

  if(!cap.open(0)) {
    printf("kein Foto moeglich");
  }

  cap >> src;

  if( !src.data )
    { }

  /// Separate the image in 3 places ( B, G and R )
  vector<Mat> bgr_planes;
  split( src, bgr_planes );

  /// Establish the number of bins
  int histSize = 256;

  /// Set the ranges ( for B,G,R) )
  float range[] = { 0, 256 } ;
  const float* histRange = { range };

  bool uniform = true; bool accumulate = false;

  Mat b_hist, g_hist, r_hist;

  /// Compute the histograms:
  calcHist( &bgr_planes[0], 1, 0, Mat(), b_hist, 1, &histSize, &histRange, uniform, accumulate );
  calcHist( &bgr_planes[1], 1, 0, Mat(), g_hist, 1, &histSize, &histRange, uniform, accumulate );
  calcHist( &bgr_planes[2], 1, 0, Mat(), r_hist, 1, &histSize, &histRange, uniform, accumulate );

  // Draw the histograms for B, G and R
  int hist_w = 512; int hist_h = 400;
  int bin_w = cvRound( (double) hist_w/histSize );

  Mat histImageR( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
  Mat histImageB( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
  Mat histImageG( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );


  /// Normalize the result to [ 0, histImage.rows ]
  normalize(b_hist, b_hist, 0, histImageB.rows, NORM_MINMAX, -1, Mat() );
  normalize(g_hist, g_hist, 0, histImageG.rows, NORM_MINMAX, -1, Mat() );
  normalize(r_hist, r_hist, 0, histImageR.rows, NORM_MINMAX, -1, Mat() );

  /// Draw for each channel
  for( int i = 1; i < histSize; i++ )
  {
      line( histImageB, Point( bin_w*(i-1), hist_h - cvRound(b_hist.at<float>(i-1)) ) ,
                       Point( bin_w*(i), hist_h - cvRound(b_hist.at<float>(i)) ),
                       Scalar( 255, 0, 0), 2, 8, 0  );
      line( histImageG, Point( bin_w*(i-1), hist_h - cvRound(g_hist.at<float>(i-1)) ) ,
                       Point( bin_w*(i), hist_h - cvRound(g_hist.at<float>(i)) ),
                       Scalar( 0, 255, 0), 2, 8, 0  );
      line( histImageR, Point( bin_w*(i-1), hist_h - cvRound(r_hist.at<float>(i-1)) ) ,
                       Point( bin_w*(i), hist_h - cvRound(r_hist.at<float>(i)) ),
                       Scalar( 0, 0, 255), 2, 8, 0  );
  }

  imwrite("orginal.jpg",src);
  imwrite( "PicR.jpg", histImageR );
  imwrite("PicG.jpg", histImageG);
  imwrite("PicB.jpg", histImageB);

  float peakRed=0.0, peakGreen=0.0, peakBlue=0.0;
  int positionRed=0, positionGreen=0, positionBlue=0;
  bool blueXred = false;
  bool fallBlue = false;

  for (int j=0;j<255;j++) {
    if(r_hist.at<float>(j) > peakRed) {
       positionRed=j;
       peakRed=r_hist.at<float>(j);
    }
    if(g_hist.at<float>(j) > peakGreen) {
      positionGreen=j;
      peakGreen=g_hist.at<float>(j);
    }
    if(b_hist.at<float>(j) > peakBlue) {
      positionBlue=j;
      peakBlue=b_hist.at<float>(j);
    }
    if(b_hist.at<float>(j)+5 < peakBlue) {
      fallBlue = true;
    }
    if(b_hist.at<float>(j)-5 > peakBlue) {
      fallBlue = false;;
    }

    if(b_hist.at<float>(j) - r_hist.at<float>(j) <5 && b_hist.at<float>(j) - r_hist.at<float>(j) >-5) {
      if (fallBlue) {
        blueXred = true;
      }
    }
    //printf("%f",r_hist<float>(j));
  }

  printf("Peak Red: %f $$ Position: %d ????",peakRed,positionRed);
  printf("Peak Blue: %f $$ Position: %d ????",peakBlue,positionBlue);
  printf("Peak Green: %f $$ Position: %d ????",peakGreen,positionGreen);


  if (positionRed < 208 && positionRed >138 && positionBlue < 216 && positionBlue > 156 && positionGreen <212 && positionGreen > 152) {
    printf("Grey");
    digitalWrite (COLOR1_PIN, HIGH);
    digitalWrite (COLOR2_PIN, HIGH);
  } else if (positionRed < 266 && positionRed >196 && positionBlue < 210 && positionBlue > 140 && positionGreen <159 && positionGreen > 89) {
    printf("Red");
    digitalWrite (COLOR1_PIN, LOW);
    digitalWrite (COLOR2_PIN, LOW);
    delay(200);
  } else if (positionRed < 100 && positionRed > 50 && positionBlue < 180 && positionBlue > 130 && positionGreen < 170 && positionGreen >120) {
    printf("Blue");
    digitalWrite (COLOR1_PIN, HIGH);
    digitalWrite (COLOR2_PIN, LOW);

  } else if (positionRed < 188 && positionRed > 148 && positionBlue < 192 && positionBlue > 132 && positionGreen < 226 && positionGreen >166) {
    printf("Green");
    digitalWrite (COLOR1_PIN, LOW);
    digitalWrite (COLOR2_PIN, HIGH);
  } else if (positionBlue < 255/4 && positionGreen < positionRed+20) {
    printf("Red");
    digitalWrite (COLOR1_PIN, LOW);
    digitalWrite (COLOR2_PIN, LOW);
    delay(200);
  }

  digitalWrite (READY_PIN, HIGH);
  digitalWrite (SIMULATION_PIN, HIGH);
}
int main(int argvc, char** argv){
  VideoCapture video;
  float media[] = {1,1,1,
				   1,1,1,
				   1,1,1};
  float gauss[] = {1,2,1,
				   2,4,2,
				   1,2,1};
  float horizontal[]={-1,0,1,
					  -2,0,2,
					  -1,0,1};
  float vertical[]={-1,-2,-1,
					0,0,0,
					1,2,1};
  float laplacian[]={0,-1,0,
					 -1,4,-1,
					 0,-1,0};

  Mat cap, frame, frame32f, frameFiltered;
  Mat mask(3,3,CV_32F), mask1;
  Mat result, result1;
  double width, height, min, max;
  int absolut;
  char key;

  video.open(0);
  if(!video.isOpened())
    return -1;
  width=video.get(CV_CAP_PROP_FRAME_WIDTH);
  height=video.get(CV_CAP_PROP_FRAME_HEIGHT);
  std::cout << "largura=" << width << "\n";;
  std::cout << "altura =" << height<< "\n";;

  namedWindow("filtroespacial",1);

  mask = Mat(3, 3, CV_32F, media);
  scaleAdd(mask, 1/9.0, Mat::zeros(3,3,CV_32F), mask1);
  swap(mask, mask1);
  absolut=1; // calcs abs of the image

  menu();
  for(;;){
    video >> cap;
    cvtColor(cap, frame, CV_BGR2GRAY);
    flip(frame, frame, 1);
    imshow("original", frame);
    frame.convertTo(frame32f, CV_32F);
    filter2D(frame32f, frameFiltered, frame32f.depth(), mask, Point(1,1), 0);
    if(absolut){
      frameFiltered=abs(frameFiltered);
    }
    frameFiltered.convertTo(result, CV_8U);
    imshow("filtroespacial", result);
    key = (char) waitKey(10);
    if( key == 27 ) break; // esc pressed!
    switch(key){
    case 'a':
	  menu();
      absolut=!absolut;
      break;
    case 'm':
	  menu();
      mask = Mat(3, 3, CV_32F, media);
      scaleAdd(mask, 1/9.0, Mat::zeros(3,3,CV_32F), mask1);
      mask = mask1;
      printmask(mask);
      break;
    case 'g':
	  menu();
      mask = Mat(3, 3, CV_32F, gauss);
      scaleAdd(mask, 1/16.0, Mat::zeros(3,3,CV_32F), mask1);
      mask = mask1;
      printmask(mask);
      break;
    case 'h':
	  menu();
      mask = Mat(3, 3, CV_32F, horizontal);
      printmask(mask);
      break;
    case 'v':
	  menu();
      mask = Mat(3, 3, CV_32F, vertical);
      printmask(mask);
      break;
    case 'l':
	  menu();
      mask = Mat(3, 3, CV_32F, laplacian);
      printmask(mask);
      break;
    case 'd':
      menu();
      mask = Mat(3, 3, CV_32F, gauss);
      scaleAdd(mask, 1/16.0, Mat::zeros(3,3,CV_32F), mask1);
      mask = mask1;
      mask = Mat(3, 3, CV_32F, laplacian);
      printmask(mask);
    default:
      break;
    }
  }
  return 0;
}
int main(int argc, char *argv[], char *window_name) {
	if (argc != 5) {
		cout << "Not enough parameters" << endl;
		return -1;
	}

	stringstream conv;


	VideoCapture capture;
	capture.open(atoi(argv[1]));

	const string compareImage1 = argv[2];
	const string compareImage2 = argv[3];
	const string compareImage3 = argv[4];

	Mat image1 = imread(compareImage1, -1);
	Mat image2 = imread(compareImage2, -1);
	Mat image3 = imread(compareImage3, -1);

	downsample(&image1);
	downsample(&image2);
	downsample(&image3);

	displayImage("Image1", image1, 0);
	displayImage("Image2", image2, 1);
	displayImage("Image3", image3, 2);


	//cv::cvtColor(image1, image1, CV_BGR2GRAY);
	//   cv::threshold(image1, image1, 128, 255, CV_THRESH_BINARY);
	//vector<std::vector<cv::Point> > storage;
	//Mat contoursImg1 = image1.clone();
	//findContours(contoursImg1, storage, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);

	Mat frame;
	Mat grayFrame;
	capture >> frame;
	int frameCounter = 0;

	//KalmanFilter kalman = KalmanFilter(2, 2, 0);

	
    ///kalman.transitionMatrix 
	//	=(Mat_<int>(2,2) << 1, 0, 1, 0);

    //setIdentity(kalman.measurementMatrix);
    //setIdentity(kalman.measurementNoiseCov, Scalar::all(1e-5));
	//setIdentity(kalman.errorCovPost, Scalar::all(1));


KalmanFilter KF(4, 2, 0);
KF.transitionMatrix = *(Mat_<float>(4, 4) << 1,0,1,0,   0,1,0,1,  0,0,1,0,  0,0,0,1);

 
// init...

setIdentity(KF.measurementMatrix);
setIdentity(KF.processNoiseCov, Scalar::all(1e-4));
setIdentity(KF.measurementNoiseCov, Scalar::all(1e-1));
setIdentity(KF.errorCovPost, Scalar::all(.1));

	while (!frame.empty()) {

		Mat prediction = KF.predict();

			//prediction.at<int>(0,0);

            cout << "Prediction: " << prediction << "   ";

		//process only grey frames:
		cvtColor(frame, grayFrame, CV_RGB2GRAY);

		//downsample(&grayFrame);
		//nearest(image1, grayFrame);

		vector<Point2f> image1Corners = nearest(image1, frame.clone());
		vector<Point2f> image2Corners = nearest(image2, frame.clone());
		vector<Point2f> image3Corners = nearest(image3, frame.clone());


		Mat measurement = 
			(Mat_<float>(2,1) << (image1Corners[0].x + image1Corners[2].x)/2, (image1Corners[0].y + image1Corners[2].y)/2); 
		

		cout << measurement << endl;

		if(measurement.at<float>(0,0) != 0){
			KF.correct(measurement);
			Point predictCentre(prediction.at<float>(0,0), prediction.at<float>(1,0));
			cout << predictCentre;
			cv::circle(frame,predictCentre,5,Scalar(255,255,255, 0),3,8,0);
		}


		if(shouldDraw((image1Corners[0].x + image1Corners[2].x)/2, (image1Corners[0].y + image1Corners[2].y)/2, prediction.at<float>(0,0),prediction.at<float>(1,0))){
			drawCorners(&frame, image1Corners, 0);
		}
		drawCorners(&frame, image2Corners, 1);
		drawCorners(&frame, image3Corners, 2);


		cv::circle(frame,cvPoint( (image1Corners[0].x + image1Corners[2].x)/2, (image1Corners[0].y + image1Corners[2].y)/2),5,Scalar(255,255,0, 0),3,8,0);
		


		//cv::circle(frame,cvPoint( getAverage(image1Corners, 0), getAverage(image1Corners,1)),5,Scalar(255,0,255, 0),3,8,0);
		imshow( "Good Matches", frame );

		//post process!
		char key = (char)waitKey(2);
		switch (key) {
		case 27: //what is this for lol
		case 's':
			frameCounter = 0;
			break;
		case 'q':
			return 0;
			break;
		}
		frameCounter++;

		if (frameCounter < 5000) {
			capture >> frame;


		}
	}
Beispiel #5
0
int main(int argc,char **argv)
{
    try
    {
        if (readArguments (argc,argv)==false) {
            return 0;
        }
        //parse arguments
        ;
        //read from camera or from  file
        if (TheInputVideo=="live") {
            TheVideoCapturer.open(0);
            waitTime=10;
        }
        else  TheVideoCapturer.open(TheInputVideo);
        //check video is open
        if (!TheVideoCapturer.isOpened()) {
            cerr<<"Could not open video"<<endl;
            return -1;

        }

        //read first image to get the dimensions
        TheVideoCapturer>>TheInputImage;

        //read camera parameters if passed
        if (TheIntrinsicFile!="") {
            TheCameraParameters.readFromXMLFile(TheIntrinsicFile);
            TheCameraParameters.resize(TheInputImage.size());
        }
        //Configure other parameters
        if (ThePyrDownLevel>0)
            MDetector.pyrDown(ThePyrDownLevel);


        //Create gui

	MDetector.getThresholdParams( ThresParam1,ThresParam2);
        MDetector.setCornerRefinementMethod(MarkerDetector::LINES);

	/*
        cv::namedWindow("thres",1);
        cv::namedWindow("in",1);
        iThresParam1=ThresParam1;
        iThresParam2=ThresParam2;
        cv::createTrackbar("ThresParam1", "in",&iThresParam1, 13, cvTackBarEvents);
        cv::createTrackbar("ThresParam2", "in",&iThresParam2, 13, cvTackBarEvents);
	*/
	
        char key=0;
        int index=0;
        //capture until press ESC or until the end of the video
        while ( key!=27 && TheVideoCapturer.grab() ) // && index <= 50)
        {
            TheVideoCapturer.retrieve( TheInputImage);
            //copy image

            index++; //number of images captured

            double tick = (double)getTickCount();//for checking the speed
            //Detection of markers in the image passed
            MDetector.detect(TheInputImage,TheMarkers,TheCameraParameters,TheMarkerSize);
            //chekc the speed by calculating the mean speed of all iterations
            AvrgTime.first+=((double)getTickCount()-tick)/getTickFrequency();
            AvrgTime.second++;
            //cout<<"Time detection="<<1000*AvrgTime.first/AvrgTime.second<<" milliseconds"<<endl;
	    
            //print marker info and draw the markers in image
            TheInputImage.copyTo(TheInputImageCopy);
            for (unsigned int i=0;i<TheMarkers.size();i++) {
	      if (AllMarkers.count( TheMarkers[i].id ) == 0)
		AllMarkers[TheMarkers[i].id] = map<int,Marker>();
	      AllMarkers[TheMarkers[i].id][index] = TheMarkers[i];
	      
	      cout<<index<<endl;
                cout<<TheMarkers[i]<<endl;
                TheMarkers[i].draw(TheInputImageCopy,Scalar(0,0,255),1);
            }
            //print other rectangles that contains no valid markers
       /**     for (unsigned int i=0;i<MDetector.getCandidates().size();i++) {
                aruco::Marker m( MDetector.getCandidates()[i],999);
                m.draw(TheInputImageCopy,cv::Scalar(255,0,0));
            }*/



            //draw a 3d cube in each marker if there is 3d info
            if (  TheCameraParameters.isValid())
                for (unsigned int i=0;i<TheMarkers.size();i++) {
                    CvDrawingUtils::draw3dCube(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                    CvDrawingUtils::draw3dAxis(TheInputImageCopy,TheMarkers[i],TheCameraParameters);
                }
            //DONE! Easy, right?
            cout<<endl<<endl<<endl;
            //show input with augmented information and  the thresholded image
            //cv::imshow("in",TheInputImageCopy);
            //cv::imshow("thres",MDetector.getThresholdedImage());

            //key=cv::waitKey(waitTime);//wait for key to be pressed
        }

	lastFrame = index;

    } catch (std::exception &ex)

    {
        cout<<"Exception :"<<ex.what()<<endl;
    }

    cout << "All done."<< endl;

    map<int, Markers>::const_iterator i;
    for( i = AllMarkers.begin(); i != AllMarkers.end(); ++i ) {
      int markerId = (*i).first;
      map<int, Marker> markers = (*i).second;

      int frameCount = markers.size();

      cout << "frameCount = " << frameCount << endl;

      std::vector<double> x(frameCount);
      std::vector<double> m0x(frameCount);
      std::vector<double> m0y(frameCount);
      std::vector<double> m1x(frameCount);
      std::vector<double> m1y(frameCount);
      std::vector<double> m2x(frameCount);
      std::vector<double> m2y(frameCount);
      std::vector<double> m3x(frameCount);
      std::vector<double> m3y(frameCount);
      std::vector<double> tx(frameCount);
      std::vector<double> ty(frameCount);
      std::vector<double> tz(frameCount);
      std::vector<double> rx(frameCount);
      std::vector<double> ry(frameCount);
      std::vector<double> rz(frameCount);

      map<int, Marker>::const_iterator j;
      int index = 0;
      for( j = markers.begin(); j != markers.end(); ++j, index++ ) {
	int frameIndex = (*j).first;
	Marker marker = (*j).second;

	x[index] = frameIndex;
	m0x[index] = marker[0].x;
	m0y[index] = marker[0].y;
	m1x[index] = marker[1].x;
	m1y[index] = marker[1].y;
	m2x[index] = marker[2].x;
	m2y[index] = marker[2].y;
	m3x[index] = marker[3].x;
	m3y[index] = marker[3].y;
	tx[index] = marker.Tvec.ptr<float>(0)[0];
	ty[index] = marker.Tvec.ptr<float>(0)[1];
	tz[index] = marker.Tvec.ptr<float>(0)[2];
	rx[index] = marker.Rvec.ptr<float>(0)[0];
	ry[index] = marker.Rvec.ptr<float>(0)[1];
	rz[index] = marker.Rvec.ptr<float>(0)[2];
	
	cout << frameIndex << endl;
      }

#define SPLINE(VAR) gsl_spline *spline_ ## VAR = gsl_spline_alloc (gsl_interp_cspline, frameCount); gsl_spline_init (spline_ ## VAR, &x[0], &VAR[0], frameCount)

      SPLINE(m0x);
      SPLINE(m0y);
      SPLINE(m1x);
      SPLINE(m1y);
      SPLINE(m2x);
      SPLINE(m2y);
      SPLINE(m3x);
      SPLINE(m3y);
      SPLINE(tx);
      SPLINE(ty);
      SPLINE(tz);
      SPLINE(rx);
      SPLINE(ry);
      SPLINE(rz);

      for( index = 0; index < lastFrame; index++ ) {

	double m0x = gsl_spline_eval (spline_m0x, index, NULL);
	double m0y = gsl_spline_eval (spline_m0y, index, NULL);
	double m1x = gsl_spline_eval (spline_m1x, index, NULL);
	double m1y = gsl_spline_eval (spline_m1y, index, NULL);
	double m2x = gsl_spline_eval (spline_m2x, index, NULL);
	double m2y = gsl_spline_eval (spline_m2y, index, NULL);
	double m3x = gsl_spline_eval (spline_m3x, index, NULL);
	double m3y = gsl_spline_eval (spline_m3y, index, NULL);
	double tx = gsl_spline_eval (spline_tx, index, NULL);
	double ty = gsl_spline_eval (spline_ty, index, NULL);
	double tz = gsl_spline_eval (spline_tz, index, NULL);
	double rx = gsl_spline_eval (spline_rx, index, NULL);
	double ry = gsl_spline_eval (spline_ry, index, NULL);
	double rz = gsl_spline_eval (spline_rz, index, NULL);

	cv::Point2f m0 = cv::Point2f(m0x,m0y);
	cv::Point2f m1 = cv::Point2f(m1x,m1y);
	cv::Point2f m2 = cv::Point2f(m2x,m2y);
	cv::Point2f m3 = cv::Point2f(m3x,m3y);

	std::vector<cv::Point2f> corners(4);
	corners[0] = m0;
	corners[1] = m1;
	corners[2] = m2;
	corners[3] = m3;
	
	Marker interpolated = Marker(corners, markerId);

	interpolated.Rvec.create(3,1,CV_32FC1);
        interpolated.Tvec.create(3,1,CV_32FC1);
	interpolated.Tvec.at<float>(0,0) = tx;
	interpolated.Tvec.at<float>(1,0) = ty;
	interpolated.Tvec.at<float>(2,0) = tz;
	interpolated.Rvec.at<float>(0,0) = rx;
	interpolated.Rvec.at<float>(1,0) = ry;
	interpolated.Rvec.at<float>(2,0) = rz;

	cout << index << endl;
	cout << interpolated << endl;
      }
      

      
      gsl_spline_free (spline_m0x);
      gsl_spline_free (spline_m0y);
      gsl_spline_free (spline_m1x);
      gsl_spline_free (spline_m1y);
      gsl_spline_free (spline_m2x);
      gsl_spline_free (spline_m2y);
      gsl_spline_free (spline_m3x);
      gsl_spline_free (spline_m3y);
      gsl_spline_free (spline_tx);
      gsl_spline_free (spline_ty);
      gsl_spline_free (spline_tz);
      gsl_spline_free (spline_rx);
      gsl_spline_free (spline_ry);
      gsl_spline_free (spline_rz);

      
      //map<int, Marker>::const_iterator j;
      
      
      
      //cout << "id = " << markerId << endl;

      
    }
    
    //cout << TheFrames << endl;
}
int main(int argc, char *argv[]) {
    CommandLineParser parser(argc, argv, keys);
    parser.about(about);

    if(argc < 2) {
        parser.printMessage();
        return 0;
    }

    int dictionaryId = parser.get<int>("d");
    bool showRejected = parser.has("r");
    bool estimatePose = parser.has("c");
    float markerLength = parser.get<float>("l");

    Ptr<aruco::DetectorParameters> detectorParams = aruco::DetectorParameters::create();
    if(parser.has("dp")) {
        bool readOk = readDetectorParameters(parser.get<string>("dp"), detectorParams);
        if(!readOk) {
            cerr << "Invalid detector parameters file" << endl;
            return 0;
        }
    }
    detectorParams->doCornerRefinement = true; // do corner refinement in markers

    int camId = parser.get<int>("ci");

    String video;
    if(parser.has("v")) {
        video = parser.get<String>("v");
    }

    if(!parser.check()) {
        parser.printErrors();
        return 0;
    }

    Ptr<aruco::Dictionary> dictionary =
        aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(dictionaryId));

    Mat camMatrix, distCoeffs;
    if(estimatePose) {
        bool readOk = readCameraParameters(parser.get<string>("c"), camMatrix, distCoeffs);
        if(!readOk) {
            cerr << "Invalid camera file" << endl;
            return 0;
        }
    }

    VideoCapture inputVideo;
    int waitTime;
    if(!video.empty()) {
        inputVideo.open(video);
        waitTime = 0;
    } else {
        inputVideo.open(camId);
        waitTime = 10;
    }

    double totalTime = 0;
    int totalIterations = 0;

    while(inputVideo.grab()) {
        Mat image, imageCopy;
        inputVideo.retrieve(image);

        double tick = (double)getTickCount();

        vector< int > ids;
        vector< vector< Point2f > > corners, rejected;
        vector< Vec3d > rvecs, tvecs;

        // detect markers and estimate pose
        aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected);
        if(estimatePose && ids.size() > 0)
            aruco::estimatePoseSingleMarkers(corners, markerLength, camMatrix, distCoeffs, rvecs,
                                             tvecs);

        double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
        totalTime += currentTime;
        totalIterations++;
        if(totalIterations % 30 == 0) {
            cout << "Detection Time = " << currentTime * 1000 << " ms "
                 << "(Mean = " << 1000 * totalTime / double(totalIterations) << " ms)" << endl;
        }

        // draw results
        image.copyTo(imageCopy);
        if(ids.size() > 0) {
            aruco::drawDetectedMarkers(imageCopy, corners, ids);

            if(estimatePose) {
                for(unsigned int i = 0; i < ids.size(); i++)
                {
                    aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvecs[i], tvecs[i],
                                    markerLength * 0.5f);
                    cout << tvecs[i] << endl;
                }
            }
        }

        if(showRejected && rejected.size() > 0)
            aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(100, 0, 255));

        imshow("out", imageCopy);
        char key = (char)waitKey(1);
        if(key == 27) break;
    }

    return 0;
}
Beispiel #7
0
int main(int argc, char** argv)
{
    int flag_use_image = 0;
    if( argc != 2 )
      {
        std::cout<< "Usage: ./init num" << std::endl;
        std::cout<< "num: 0 - image" << std::endl
                 << "     1 - video" << std::endl
                 << "     2 - dataset" << std::endl;
        return -1;
    }
    else
    {
        std::string val = argv[1];
        if(val == "0")
        {

        }
        else if(val == "1")
        {
            flag_use_image = 1;
        }
        else if(val == "2")
        {
            flag_use_image = 2;
        }
        else
        {
            std::cout<< "num error" << std::endl;
        }
    }

    std::string winName = "Image";
    namedWindow(winName, WINDOW_NORMAL);
    mat_canvas = imread( "data/book.jpg");

    if(mat_canvas.data == NULL)
    {
        std::cout<< "Image is not opened." << std::endl;
        return -1;
    }


    if(flag_use_image == 0)
    {



        setMouseCallback(winName, mouseEvent);




//        // write mat to file
//        std::string fileName = "mat_descriptors.yml";
//        FileStorage fs(fileName, FileStorage::WRITE);
//        fs << "descriptors" << mat_descriptors;
//        fs.release();
//        std::cout<< fileName << " is generated." << std::endl;

//        Mat copy;
//        FileStorage fs2("mat_descriptors.yml", FileStorage::READ);
//        fs2["descriptors"] >> copy;
//        fs2.release();

//        FileStorage fs3("test.yml", FileStorage::WRITE);
//        fs3 << "descriptors" << copy;
//        fs3.release();


        //////////////////////////////////////////////////////////
//        std::vector<cv::Point3f> vec_pois;
//        vec_pois.push_back(Point3f(0, 0, 0));
//        vec_pois.push_back(Point3f(1.1, 0.1, 0));
//        vec_pois.push_back(Point3f(0.3, 2.1, 0));
//        vec_pois.push_back(Point3f(7.3, 2, 0));
//        vec_pois.push_back(Point3f(1.3, 4.1, 0));

//        FileStorage fs3("POIs.yml", FileStorage::WRITE);
//        fs3 << "POIs" << vec_pois;
//        fs3.release();

        //////////////////////////////////////////////////////////




        while(1)
        {
            imshow(winName, mat_canvas );

            waitKey(30);
        }

    }
    //-- use dataset
    else if(flag_use_image == 2)
    {



        useDataset();





        while(1)
        {

            imshow(winName, mat_canvas );

            waitKey(30);
        }

    }
    else // video input: tracking features
    {
        VideoCapture cap;

        cap.open(1);
        if(!cap.isOpened())  // check if we succeeded
            return -1;
        cap.set(CV_CAP_PROP_FRAME_WIDTH, 800);
        cap.set(CV_CAP_PROP_FRAME_HEIGHT, 600);


        namedWindow("Keypoints", WINDOW_NORMAL);
        Mat mat_image;
        int num_vecKeypoints;
        int num_trackingPoints = 50;
        Mat mat_descriptors;

        char keyInput;

        //-- Step 1: Detect the keypoints using Detector
        // int minHessian = 400;





        OrbFeatureDetector detector;
        FREAK extractor;

        while(1)
        {
            cap >> mat_image;

            std::vector<KeyPoint> vec_keypoints, vec_goodKeypoints;

            detector.detect( mat_image, vec_keypoints );
            num_vecKeypoints = vec_keypoints.size();

            std::sort(vec_keypoints.begin(), vec_keypoints.end(),
                      jlUtilities::sort_feature_response);

            if(num_vecKeypoints > num_trackingPoints)
            {
                num_vecKeypoints = num_trackingPoints;
                vec_keypoints.erase(vec_keypoints.begin() + num_vecKeypoints,
                                   vec_keypoints.end());
            }


            extractor.compute( mat_image, vec_keypoints, mat_descriptors );


            // write mat to file
            std::string fileName = "mat_descriptors.yml";
            FileStorage fs(fileName, FileStorage::WRITE);
            fs << "descriptors" << mat_descriptors;
            fs.release();
            std::cout<< fileName << " is generated." << std::endl;

    //        Mat copy;
    //        FileStorage fs2("mat_descriptors.yml", FileStorage::READ);
    //        fs2["descriptors"] >> copy;
    //        fs2.release();

    //        FileStorage fs3("test.yml", FileStorage::WRITE);
    //        fs3 << "descriptors" << copy;
    //        fs3.release();


            //////////////////////////////////////////////////////////
    //        std::vector<cv::Point3f> vec_pois;
    //        vec_pois.push_back(Point3f(0, 0, 0));
    //        vec_pois.push_back(Point3f(1.1, 0.1, 0));
    //        vec_pois.push_back(Point3f(0.3, 2.1, 0));
    //        vec_pois.push_back(Point3f(7.3, 2, 0));
    //        vec_pois.push_back(Point3f(1.3, 4.1, 0));

    //        FileStorage fs3("POIs.yml", FileStorage::WRITE);
    //        fs3 << "POIs" << vec_pois;
    //        fs3.release();

            //////////////////////////////////////////////////////////

            //-- Draw keypoints
            Mat mat_kpImage;

            drawKeypoints( mat_image, vec_keypoints, mat_kpImage,
                           Scalar::all(-1), DrawMatchesFlags::DEFAULT );

            for (int i=0; i<num_trackingPoints; i++)	{
                cv::circle(mat_kpImage,
                    vec_keypoints[i].pt,	// center
                    3,							// radius
                    cv::Scalar(0,0,255),		// color
                    -1);						// negative thickness=filled

                char szLabel[50];
                sprintf(szLabel, "%d", i);
                putText (mat_kpImage, szLabel, vec_keypoints[i].pt,
                    cv::FONT_HERSHEY_PLAIN, // font face
                    1.0,					// font scale
                    cv::Scalar(255,0,0),	// font color
                    1);						// thickness
            }


            //-- Show detected (drawn) keypoints
            imshow("Keypoints", mat_kpImage );

            waitKey(30);
        }


    }


    return 0;
}
Beispiel #8
0
main(int argc, const char* argv[])
{

	Mat frame, uiframe, grayFrame, result, skin, frame1, frame2, frame3,frame4, frame5;
	VideoCapture  capture;
	int lowerBound = 200;
	int upperBound = 255;
	
	//Se inicia la camara
	
	capture.open(0);
	if( !capture.isOpened() )
	{
		std::cout << "no se encontro la camara" << std::endl;
		return -1;
	}

	//Crea la ventana
	cvNamedWindow("Result", CV_WINDOW_AUTOSIZE);
	cvCreateTrackbar("Rango inferior", "Result", &lowerBound, 255 );
	cvCreateTrackbar("Rango superior", "Result", &upperBound, 255 );

	//Captura de la camara
	while(1)
	{
		for(int i = 0; i < 5; i++ ) capture >> frame;
		capture >> frame;
		uiframe = frame.clone();

		// Dibuja las barras inferiores y superiores
		dibujarInterface(uiframe);
		imshow("Result", uiframe );
		cvWaitKey();

		//Obtiene la mascara de la piel
		skin = getSkin(frame);

		//Convierte el frame a escala de grises
		cvtColor(frame, grayFrame, CV_BGR2GRAY);
		//Histogram Equalization
		equalizeHist(grayFrame, frame1);

		blur(frame1,frame2, Size(5,5));

		//erociona y dilatacion
		int size = 6;
		Mat element = getStructuringElement(MORPH_CROSS, Size(2*size+1,2*size+1), Point(size,size) );
		erode(frame2, frame3, element );
		dilate(frame3, frame4, element );

		inRange(frame4, Scalar(lowerBound), Scalar(upperBound), frame5);

		applyMask(frame5, frame, result);
		applyMask(skin, result, result, 255);
		cvtColor( result , result, CV_BGR2GRAY);
		//Para dibujar el rectangulo 
		Rect rect = EncontrarSonrisa( result );
		rectangle( frame, rect , cvScalar(0,0,255));

		// Dibuja las barras inferiores y superiores
		dibujarInterface(frame);

		//Muestra el frame
		imshow("Result", frame);
		cvWaitKey();
	}
	return 0;
}
int main(int argc, char* argv[])
{
	//if we would like to calibrate our filter values, set to true.
	bool calibrationMode = true;
	
	//Matrix to store each frame of the webcam feed
	Mat cameraFeed;
	Mat threshold;
	Mat filteredImage;

	if(calibrationMode){
		//create slider bars for HSV filtering
		createTrackbars();
	}
	//video capture object to acquire webcam feed
	VideoCapture capture;
	//open capture object at location zero (default location for webcam)
	capture.open(1);
	//set height and width of capture frame
	capture.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
	//start an infinite loop where webcam feed is copied to cameraFeed matrix
	//all of our operations will be performed within this loop
	while(1){
		//store image to matrix
		capture.read(cameraFeed);
		// flip(cameraFeed,cameraFeed,1); //flip camera
		filteredImage = cameraFeed.clone();
		filteredImage = filterRed(filteredImage);
		
		//convert frame from BGR to HSV colorspace
		// cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);

		if(calibrationMode==true){
		//if in calibration mode, we track objects based on the HSV slider values.
		// cvtColor(cameraFeed,HSV,COLOR_BGR2HSV);
		inRange(filteredImage,Scalar(254,254,254),Scalar(255,255,255),threshold);
		morphOps(threshold);
		imshow(windowName2,threshold);
		trackFilteredObject(threshold,filteredImage,cameraFeed);
		}

		//show frames 
		imshow(windowName2,threshold);

		imshow(windowName,cameraFeed);
		imshow(windowName1,filteredImage);


		//delay 30ms so that screen can refresh.
		//image will not appear without this waitKey() command
		waitKey(30);
	}






	return 0;
}
int video_homography(int ac, char ** av)
{

    if (ac != 2)
    {
        help(av);
        return 1;
    }

    BriefDescriptorExtractor brief(32);

    VideoCapture capture;
    capture.open(atoi(av[1]));
    if (!capture.isOpened())
    {
        help(av);
        cout << "capture device " << atoi(av[1]) << " failed to open!" << endl;
        return 1;
    }

    cout << "following keys do stuff:" << endl;
    cout << "t : grabs a reference frame to match against" << endl;
    cout << "l : makes the reference frame new every frame" << endl;
    cout << "q or escape: quit" << endl;

    Mat frame;

    vector<DMatch> matches;

    BFMatcher desc_matcher(NORM_HAMMING);

    vector<Point2f> train_pts, query_pts;
    vector<KeyPoint> train_kpts, query_kpts;
    vector<unsigned char> match_mask;

    Mat gray;

    bool ref_live = true;

    Mat train_desc, query_desc;
    const int DESIRED_FTRS = 500;
    GridAdaptedFeatureDetector detector(new FastFeatureDetector(10, true), DESIRED_FTRS, 4, 4);

    Mat H_prev = Mat::eye(3, 3, CV_32FC1);
    for (;;)
    {
        capture >> frame;
        if (frame.empty())
            break;

        cvtColor(frame, gray, COLOR_RGB2GRAY);

        detector.detect(gray, query_kpts); //Find interest points

        brief.compute(gray, query_kpts, query_desc); //Compute brief descriptors at each keypoint location

        if (!train_kpts.empty())
        {

            vector<KeyPoint> test_kpts;
            warpKeypoints(H_prev.inv(), query_kpts, test_kpts);

            Mat mask = windowedMatchingMask(test_kpts, train_kpts, 25, 25);
            desc_matcher.match(query_desc, train_desc, matches, mask);
            drawKeypoints(frame, test_kpts, frame, Scalar(255, 0, 0), DrawMatchesFlags::DRAW_OVER_OUTIMG);

            matches2points(train_kpts, query_kpts, matches, train_pts, query_pts);

            if (matches.size() > 5)
            {
                Mat H = findHomography(train_pts, query_pts, RANSAC, 4, match_mask);
                if (countNonZero(Mat(match_mask)) > 15)
                {
                    H_prev = H;
                }
                else
                    resetH(H_prev);
                drawMatchesRelative(train_kpts, query_kpts, matches, frame, match_mask);
            }
            else
                resetH(H_prev);

        }
        else
        {
            H_prev = Mat::eye(3, 3, CV_32FC1);
            Mat out;
            drawKeypoints(gray, query_kpts, out);
            frame = out;
        }

        imshow("frame", frame);

        if (ref_live)
        {
            train_kpts = query_kpts;
            query_desc.copyTo(train_desc);
        }
        char key = (char)waitKey(2);
        switch (key)
        {
        case 'l':
            ref_live = true;
            resetH(H_prev);
            break;
        case 't':
            ref_live = false;
            train_kpts = query_kpts;
            query_desc.copyTo(train_desc);
            resetH(H_prev);
            break;
        case 27:
        case 'q':
            return 0;
            break;
        }

    }
    return 0;
}
Beispiel #11
0
int main(int argc, char** argv)
{
    CommandLineParser parser(argc, argv, keys);
    parser.about("Use this script to run object detection deep learning networks using OpenCV.");
    if (argc == 1 || parser.has("help"))
    {
        parser.printMessage();
        return 0;
    }

    confThreshold = parser.get<float>("thr");
    float scale = parser.get<float>("scale");
    Scalar mean = parser.get<Scalar>("mean");
    bool swapRB = parser.get<bool>("rgb");
    int inpWidth = parser.get<int>("width");
    int inpHeight = parser.get<int>("height");

    // Open file with classes names.
    if (parser.has("classes"))
    {
        std::string file = parser.get<String>("classes");
        std::ifstream ifs(file.c_str());
        if (!ifs.is_open())
            CV_Error(Error::StsError, "File " + file + " not found");
        std::string line;
        while (std::getline(ifs, line))
        {
            classes.push_back(line);
        }
    }

    // Load a model.
    CV_Assert(parser.has("model"));
    Net net = readNet(parser.get<String>("model"), parser.get<String>("config"), parser.get<String>("framework"));
    net.setPreferableBackend(parser.get<int>("backend"));
    net.setPreferableTarget(parser.get<int>("target"));

    // Create a window
    static const std::string kWinName = "Deep learning object detection in OpenCV";
    namedWindow(kWinName, WINDOW_NORMAL);
    int initialConf = (int)(confThreshold * 100);
    createTrackbar("Confidence threshold, %", kWinName, &initialConf, 99, callback);

    // Open a video file or an image file or a camera stream.
    VideoCapture cap;
    if (parser.has("input"))
        cap.open(parser.get<String>("input"));
    else
        cap.open(0);

    // Process frames.
    Mat frame, blob;
    while (waitKey(1) < 0)
    {
        cap >> frame;
        if (frame.empty())
        {
            waitKey();
            break;
        }

        // Create a 4D blob from a frame.
        Size inpSize(inpWidth > 0 ? inpWidth : frame.cols,
                     inpHeight > 0 ? inpHeight : frame.rows);
        blobFromImage(frame, blob, scale, inpSize, mean, swapRB, false);

        // Run a model.
        net.setInput(blob);
        if (net.getLayer(0)->outputNameToIndex("im_info") != -1)  // Faster-RCNN or R-FCN
        {
            resize(frame, frame, inpSize);
            Mat imInfo = (Mat_<float>(1, 3) << inpSize.height, inpSize.width, 1.6f);
            net.setInput(imInfo, "im_info");
        }
        Mat out = net.forward();

        postprocess(frame, out, net);

        // Put efficiency information.
        std::vector<double> layersTimes;
        double freq = getTickFrequency() / 1000;
        double t = net.getPerfProfile(layersTimes) / freq;
        std::string label = format("Inference time: %.2f ms", t);
        putText(frame, label, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0));

        imshow(kWinName, frame);
    }
    return 0;
}
Beispiel #12
0
int main(int argc, char ** argv){

    VideoCapture cap;


    if(argc > 1){
        cout << argv[1] << endl;
        cap.open(argv[1]);

    }else{
        cap.open(0);
        cap.set(CV_CAP_PROP_FRAME_WIDTH, 640.);
        cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480.);
    }

    if(!cap.isOpened())
    {
        cerr << "Cant open video" << endl;
        return EXIT_FAILURE;
    }
    Mat frame, gray, frame_prev, gray_prev;

    cap >> frame;

//    myloader loader("input_data.yaml",frame);

//    loader.parseConfig();

    w.fps = 0;
    w.new_frame = true;
Mat h;
//    bool enough = false;

    int maxCorners = 300;
    int minCorners = 45;
    vector<Point2f> corners, corners_prev, corners_init;

    vector<vector<Point2f>> correspondences(2, vector<Point2f>()); // init two vector of vectors :D


    TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
    Size subPixWinSize(10,10), winSize(31,31);

//    thread t1(Thread);

    namedWindow(WIN1, WINDOW_NORMAL);
    setMouseCallback(WIN1, CallBackFunc, NULL);


    Mat out;
    bool init = true, drawPoints = true;

    Size boxSize = {200,200};
    Rect box;

    box += boxSize;
    box += Point(1920/2, 1080/2);

    while(!DispThreadDone)
    {
        cap >> frame;

        if(frame.empty())
            break;

        cvtColor(frame, gray, CV_BGR2GRAY);

        Mat gray_blend;
        if(gray_prev.empty()){
            frame.copyTo(gray_blend);
        }
        else{
            frame.copyTo(gray_blend);
//            addWeighted(gray, 0.5, gray_prev, 0.5,0.0, gray_blend);
        }

        if (gray_blend.type()==CV_8UC1) {
                      //input image is grayscale
            cvtColor(gray_blend, out, CV_GRAY2RGB);

        } else {
            gray_blend.copyTo(out);
        }

        if(tracked_points.size() < 4){
            while(tracked_points.size() != 4){
                waitKey(10);

                for(Point p : tracked_points){
                    circle(out, p, 2,Scalar(255,0,0));
                }
                imshow(WIN1, out);
            }

//            for(int i = 0; i < tracked_points.size(); i ++){
//                tracked_points[i].y -= 300;
//            }
        }

        if(init){

            goodFeaturesToTrack(gray, corners, maxCorners, 0.01, 10.0, Mat(), 3,0, 0.04);

            if(corners.empty())
                continue;

            cornerSubPix(gray, corners, subPixWinSize, Size(-1,-1), termcrit);
            init = false;

            corners_prev.clear();
            corners_init = corners;
        }

        if(!corners_prev.empty())
        {
//            vector<Point2f> chessboardCorners;
//            findChessboardCorners(gray, Size(9,6), chessboardCorners, CV_CALIB_CB_ADAPTIVE_THRESH);

//            for(Point2f p : chessboardCorners){
//                circle(out, p, 3, Scalar(120,255,0), 2);
//            }
            vector<uchar> status;
            vector<float> err;

            calcOpticalFlowPyrLK(gray_prev, gray, corners_prev, corners, status, err, winSize, 3, termcrit, 0, 0.001);

            size_t i,k;

            double move_x = 0.;
            double move_y = 0.;

            int moves_counter = 0;

            correspondences[0].clear();correspondences[1].clear();
                for(i = k = 0; i < status.size(); ++i){
                    if(!status[i])
                        continue;


                    correspondences[0].push_back(corners_init[i]);
                    correspondences[1].push_back(corners[i]);
    //                corners[k++] = corners[i];
    //                corners_init[k++] = corners_init[i];

    //                circle(gray, corners[i], 3, PINK, -1, 8);
    //                circle(gray_prev, corners_prev[i], 3, GREEN, -1, 8);


                    Point p,q;
                    p.x = (int) corners_init[i].x;
                    p.y = (int) corners_init[i].y;
                    q.x = (int) corners_prev[i].x;
                    q.y = (int) corners_prev[i].y;

                    double angle;		angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
                    double hypotenuse;	hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) );

                    /* Here we lengthen the arrow by a factor of three. */
                    q.x = (int) (p.x - hypotenuse * cos(angle));
                    q.y = (int) (p.y - hypotenuse * sin(angle));

                    if(drawPoints){
                        circle(out, corners_init[i], 3, BLUE, -1, 8);
                        circle(out, corners[i], 3, GREEN, -1, 8);
                        circle(out, corners_prev[i], 3, GREEN, -1, 8);

                        line(out, p, q, RED, 1, CV_AA, 0 );
                        p.x = (int) (q.x + 9 * cos(angle + pi / 4));
                        p.y = (int) (q.y + 9 * sin(angle + pi / 4));
                        line( out, p, q, RED, 1, CV_AA, 0 );
                        p.x = (int) (q.x + 9 * cos(angle - pi / 4));
                        p.y = (int) (q.y + 9 * sin(angle - pi / 4));
                        line( out, p, q, RED, 1, CV_AA, 0 );
                    }




                }

            if(correspondences[0].size() > 4){
                h = findHomography(correspondences[0], correspondences[1]);

                vector<Point2f> new_points(4);
                perspectiveTransform(tracked_points, new_points, h);



                line(out, new_points[0], new_points[1], Scalar(0,255,0));
                line(out, new_points[1], new_points[2], Scalar(0,255,0));
                line(out, new_points[2], new_points[3], Scalar(0,255,0));
                line(out, new_points[3], new_points[0], Scalar(0,255,0));

                draw_ar(out, new_points);
            }



//            corners_prev.resize(k);
            if(corners_prev.size() < minCorners){
                init = true;
            }


        }



        imshow(WIN1,out);
//        imshow("Actual Frame", gray);
//        if(!gray_prev.empty())
//            imshow("Previous Frame", gray_prev);

        std::swap(corners_prev,corners);
        cv::swap(gray_prev, gray);



        char c = cvWaitKey(10);
        switch (c){
            case 'q':
                MainThreadDone = true; //finish him
                DispThreadDone = true;
            break;
            case 'r':
                init = true;
            break;
            case ' ':
                drawPoints = !drawPoints;
            break;
            case 's':
                cout << "H: " << h << endl;
                waitKey();
            break;
        }

//        t1.join();

    }

    return EXIT_SUCCESS;

}
Beispiel #13
0
int main(int argc, char** argv) {

    //Check arguments
    //***

    int set=trainData();

    //If set=0, proceed
    if(set==0) {


        //Take the two video inputs and measure the similarity
        float firstTF[1000];//***
        float secondTF[1000];
        int n_frames[1000];

        //////////////////////////////////////////////////////////////////////////////////////////////////
        Mat dicty;
        FileStorage fs("full_dictionary.yml", FileStorage::READ);
        fs["vocabulary"] >> dicty;
        fs.release();

        //set dictionary
        int dict_size=100;//***
        //create a nearest neighbor matcher
        Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher);
        //create Sift feature point extracter
        Ptr<FeatureDetector> detector(new SiftFeatureDetector());
        //create Sift descriptor extractor
        Ptr<DescriptorExtractor> extractor(new SiftDescriptorExtractor);
        //create BoF (or BoW) descriptor extractor
        BOWImgDescriptorExtractor bowDE(extractor,matcher);
        //Set the dictionary with the vocabulary we created in the first step
        bowDE.setVocabulary(dicty);

//////////////////////////////First Video//////////////////////////////////////////////////////////

        ofstream myfile;
        myfile.open ("first_video.txt");
        myfile << "Calculating TF_VECTORS.\n";

        //initialize capture
        VideoCapture cap;
        cap.open(argv[1]); //***

        double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        int jump=count/N; //extract 10 frames from the video ***
        int j=0;
        if(count<10) {
            jump=1;
        }
        int cnt=jump;
        myfile<<"Reading Video";
        Mat features;
        Mat desc;

        int u=0;
        while(u<10) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );

            //To store the keypoints that will be extracted by SIFT
            vector<KeyPoint> keypoints;
            //Detect SIFT keypoints (or feature points)
            detector->detect(gray_image,keypoints);
            //To store the BoW (or BoF) representation of the image
            Mat bowDescriptor;
            //extract BoW (or BoF) descriptor from given image
            bowDE.compute(gray_image,keypoints,bowDescriptor);

            desc.push_back(bowDescriptor);

            cnt+=jump;
            j++;
            u++;
            ///next frame for the same video
        }
        //FileStorage fs("descriptor.yml", FileStorage::WRITE);
        //fs << "descriptor" << desc;
        //fs.release();



        for(int k=0; k<desc.cols; k++) {
            int tf=0;
            for(int l=0; l<desc.rows; l++) {
                if(desc.at<float>(l,k)>0) {

                    //cout<<bowDescriptor.at<float>(i,j)<<endl;
                    tf++;
                }


            }
            myfile<<"Term Frequency:"<<tf<<"\n";
            firstTF[k]=tf;

        }



        myfile<<"TF done";
        myfile.close();

//////////////////////////////Second Video//////////////////////////////////////////////////////////

        ofstream myfile3;
        myfile3.open ("second_video.txt");
        myfile3 << "Calculating IDF_VECTORS.\n";

        //initialize capture
        cap.open(argv[2]); //***

        count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        jump=count/N; //extract 10 frames from the video ***
        j=0;
        if(count<10) {
            jump=1;
        }
        cnt=jump;
        myfile3<<"Reading Video";
        Mat desc2;
        u=0;
        while(u<10) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );

            //To store the keypoints that will be extracted by SIFT
            vector<KeyPoint> keypoints;
            //Detect SIFT keypoints (or feature points)
            detector->detect(gray_image,keypoints);
            //To store the BoW (or BoF) representation of the image
            Mat bowDescriptor;
            //extract BoW (or BoF) descriptor from given image
            bowDE.compute(gray_image,keypoints,bowDescriptor);

            desc2.push_back(bowDescriptor);
            cnt+=jump;
            j++;
            u++;
            ///next frame for the same video
        }


        for(int k=0; k<desc2.cols; k++) {
            int tf=0;
            for(int l=0; l<desc2.rows; l++) {
                if(desc2.at<float>(l,k)>0) {

                    //cout<<bowDescriptor.at<float>(i,j)<<endl;
                    tf++;
                }


            }
            myfile3<<"Term Frequency:"<<tf<<"\n";
            secondTF[k]=tf;

        }

        myfile3<<"TF done";
        myfile3.close();

//////////////////////////////////////////////////////////////////////////////////////////////////////////

        //Display the similarity score

        //Dot product of TF vectors


        float similarity=0;
        ofstream my3;
        my3.open("Similarity.txt");

        for(int i=0; i<dict_size; i++) {
            similarity+=firstTF[i]*secondTF[i];

        }
        my3<<"\n";
        my3<<similarity<<" ";
        my3.close();

        cout<<"Similarity Score:"<<similarity<<endl;

    }
Beispiel #14
0
void idf_vector(Mat full_dictionary) {
    ofstream myfile;
    myfile.open ("example.txt");
    myfile << "Calculating IDF_VECTORS.\n";

    std:: string videoName="";

    int n_frames[100];
    //create dictionary
    int dict_size=100;//***


    //create a nearest neighbor matcher
    Ptr<DescriptorMatcher> matcher(new FlannBasedMatcher);
    //create Sift feature point extracter
    Ptr<FeatureDetector> detector(new SiftFeatureDetector());
    //create Sift descriptor extractor
    Ptr<DescriptorExtractor> extractor(new SiftDescriptorExtractor);
    //create BoF (or BoW) descriptor extractor
    BOWImgDescriptorExtractor bowDE(extractor,matcher);
    //Set the dictionary with the vocabulary we created in the first step
    bowDE.setVocabulary(full_dictionary);

    for(int i=1; i<no_videos; i++) {

        stringstream temp;
        temp<<i;
        std::string no=temp.str();
        videoName="C:/Rasika/video_"+no+".avi"; //*** path can be changed

        //initialize capture
        VideoCapture cap;
        cap.open(videoName);

        double count = cap.get(CV_CAP_PROP_FRAME_COUNT); //get the frame count

        int jump=count/N; //extract 10 frames from the video ***
        int j=0;
        int cnt=0;
        myfile<<"Reading Video";
        Mat features;
        Mat desc;
        while(cnt<count) {

            //Create matrix to store video frame
            Mat image;
            cap.set(CV_CAP_PROP_POS_FRAMES,cnt); //Set index to jump for particular count
            bool success = cap.read(image);
            if (!success) {
                cout << "Cannot read  frame " << endl;
                break;
            }

            ///////////Convert to gray scale/////////////
            Mat gray_image;
            cvtColor( image, gray_image, CV_BGR2GRAY );
            imagesData++;//Number of images in the database

            //To store the keypoints that will be extracted by SIFT
            vector<KeyPoint> keypoints;
            //Detect SIFT keypoints (or feature points)
            detector->detect(gray_image,keypoints);
            //To store the BoW (or BoF) representation of the image
            Mat bowDescriptor;
            //extract BoW (or BoF) descriptor from given image
            bowDE.compute(gray_image,keypoints,bowDescriptor);

            desc.push_back(bowDescriptor);

            ////////////////
            //delay 33ms //***
            //waitKey(33);

            cnt+=jump;
            j++;

            ///next frame for the same video
        }



        /*myfile<<desc.rows<<endl;
        myfile<<desc.cols<<endl;

        int tf=0;
        for(int i=0;i<desc.rows;i++){
        	for(int j=0;j<desc.cols;j++){
        		if(desc.at<float>(i,j)>0){

        			//cout<<bowDescriptor.at<float>(i,j)<<endl;
        			tf++;
        		}
        	}
        }

        myfile<<"Term Frequency:"<<tf<<"\n";
        float idf=0;
        float logcal=count/tf;
        idf=log(logcal);
        myfile<<"IDF:"<<idf<<"\n";
        idfVector[i-1][j]=idf;

        myfile<<idfVector[i-1][j];*/

        //store number of frames per video
        n_frames[i-1]=j;


    }
    myfile<<"IDF done";
    myfile.close();



}
Beispiel #15
0
int main( int argc, char** argv )
{
    Size boardSize, imageSize;
    float squareSize = 1.f, aspectRatio = 1.f;
    Mat cameraMatrix, distCoeffs;
    const char* outputFilename = "out_camera_data.yml";
    const char* inputFilename = 0;

    int i, nframes = 10;
    bool writeExtrinsics = false, writePoints = false;
    bool undistortImage = false;
    int flags = 0;
    VideoCapture capture;
    bool flipVertical = false;
    bool showUndistorted = false;
    bool videofile = false;
    int delay = 1000;
    clock_t prevTimestamp = 0;
    int mode = DETECTION;
    int cameraId = 0;
    vector<vector<Point2f> > imagePoints;
    vector<string> imageList;
    Pattern pattern = CHESSBOARD;

    if( argc < 2 )
    {
        help();
        return 0;
    }

    for( i = 1; i < argc; i++ )
    {
        const char* s = argv[i];
        if( strcmp( s, "-w" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", (unsigned int*)&boardSize.width ) != 1 || boardSize.width <= 0 )
                return fprintf( stderr, "Invalid board width\n" ), -1;
        }
        else if( strcmp( s, "-h" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", (unsigned int*)&boardSize.height ) != 1 || boardSize.height <= 0 )
                return fprintf( stderr, "Invalid board height\n" ), -1;
        }
        else if( strcmp( s, "-pt" ) == 0 )
        {
            i++;
            if( !strcmp( argv[i], "circles" ) )
                pattern = CIRCLES_GRID;
            else if( !strcmp( argv[i], "acircles" ) )
                pattern = ASYMMETRIC_CIRCLES_GRID;
            else if( !strcmp( argv[i], "chessboard" ) )
                pattern = CHESSBOARD;
            else
                return fprintf( stderr, "Invalid pattern type: must be chessboard or circles\n" ), -1;
        }
        else if( strcmp( s, "-s" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &squareSize ) != 1 || squareSize <= 0 )
                return fprintf( stderr, "Invalid board square width\n" ), -1;
        }
        else if( strcmp( s, "-n" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", (unsigned int*)&nframes ) != 1 || nframes <= 3 )
                return printf("Invalid number of images\n" ), -1;
        }
        else if( strcmp( s, "-a" ) == 0 )
        {
            if( sscanf( argv[++i], "%f", &aspectRatio ) != 1 || aspectRatio <= 0 )
                return printf("Invalid aspect ratio\n" ), -1;
            flags |= CV_CALIB_FIX_ASPECT_RATIO;
        }
        else if( strcmp( s, "-d" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", (unsigned int*)&delay ) != 1 || delay <= 0 )
                return printf("Invalid delay\n" ), -1;
        }
        else if( strcmp( s, "-op" ) == 0 )
        {
            writePoints = true;
        }
        else if( strcmp( s, "-oe" ) == 0 )
        {
            writeExtrinsics = true;
        }
        else if( strcmp( s, "-zt" ) == 0 )
        {
            flags |= CV_CALIB_ZERO_TANGENT_DIST;
        }
        else if( strcmp( s, "-p" ) == 0 )
        {
            flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
        }
        else if( strcmp( s, "-v" ) == 0 )
        {
            flipVertical = true;
        }
        else if( strcmp( s, "-V" ) == 0 )
        {
            videofile = true;
        }
        else if( strcmp( s, "-o" ) == 0 )
        {
            outputFilename = argv[++i];
        }
        else if( strcmp( s, "-su" ) == 0 )
        {
            showUndistorted = true;
        }
        else if( s[0] != '-' )
        {
            if( isdigit(s[0]) )
                sscanf(s, "%d", &cameraId);
            else
                inputFilename = s;
        }
        else
            return fprintf( stderr, "Unknown option %s", s ), -1;
    }

    if( inputFilename )
    {
        if( !videofile && readStringList(inputFilename, imageList) )
            mode = CAPTURING;
        else
            capture.open(inputFilename);
    }
    else
        capture.open(cameraId);

    if( !capture.isOpened() && imageList.empty() )
        return fprintf( stderr, "Could not initialize video (%d) capture\n",cameraId ), -2;

    if( !imageList.empty() )
        nframes = (int)imageList.size();

    if( capture.isOpened() )
        printf( "%s", liveCaptureHelp );

    namedWindow( "Image View", 1 );

    for(i = 0;;i++)
    {
        Mat view, viewGray;
        bool blink = false;

        if( capture.isOpened() )
        {
            Mat view0;
            capture >> view0;
            view0.copyTo(view);
        }
        else if( i < (int)imageList.size() )
int main (int argc, char * const argv[]) 
{	
	
	
	string WINDOW_NAME = "Object Tracker"; 
	
	Size patchSize = Size(30,30); 
	
	string GENTLEBOOST_FILE= "data/GenkiSZSLCascade.txt"; 
	
	Size minSearchSize(0,0); 
	Size maxSearchSize(0,0); 
	int numFeaturesToUse = -1;//-1; //3; //-1 means "All"
	int NMSRadius = 15; //15; 
	double patchThresh = -INFINITY;  //-INFINITY means "All", 0 means "p>.5"
	int maxObjects = 20; 
	int skipFrames = 0; 
	int useFast = 1; 
	
	Size imSize(320,240); 
	int key=0;
	
	/* Open capture */ 
	VideoCapture capture; 
	int usingCamera = NMPTUtils::getVideoCaptureFromCommandLineArgs(capture, argc, (const char**) argv); 
	if (!usingCamera--) return 0;  
	
	/* Set capture to desired width/height */ 
	if (usingCamera) {
		capture.set(CV_CAP_PROP_FRAME_WIDTH, imSize.width); 
		capture.set(CV_CAP_PROP_FRAME_HEIGHT, imSize.height); 
	}

	
    namedWindow (WINDOW_NAME, CV_WINDOW_AUTOSIZE); //Create the graphical algorithm display
    Mat current_frame, color_image, gray_image;
	
	BlockTimer bt; 
		
	GentleBoostCascadedClassifier* booster = new GentleBoostCascadedClassifier(); 
	
	ifstream in; 
	in.open(GENTLEBOOST_FILE.c_str()); 
	in >> booster; 
	in.close(); 
	booster->setSearchParams(useFast, minSearchSize, maxSearchSize,1.2, 1, 1); 
	booster->setNumFeaturesUsed(numFeaturesToUse); 
	
	int i = 0; 
    while (key != 'q' && key != 'Q') //Loop until user enters 'q'
    {
		
		//cout<< "Getting camera frame " << endl; 
		capture >> current_frame; 
		if (i++%(skipFrames+1) > 0 && !usingCamera) continue; 
		
		current_frame.copyTo(color_image); 
		cvtColor(current_frame, gray_image, CV_RGB2GRAY); 
				
		vector<SearchResult> boxes; 
		
		bt.blockRestart(2); 
		Mat img = gray_image; 
		booster->searchImage(img, boxes, NMSRadius, patchThresh); 
		cout << "Image Search Time was " << bt.getCurrTime(2)<< endl; 		
		
		if (boxes.size() > (unsigned int) maxObjects) {
			boxes.resize(maxObjects); 
		} 
		
		for (size_t i = 0; i < boxes.size(); i++) {
			Rect imgloc = boxes[i].imageLocation; 
			Point center = Point(imgloc.x + imgloc.width/2.0, imgloc.y + imgloc.height/2.0); 
			Scalar color; 
			if (boxes[i].value > 0 ) 
				color = (i== 0) ? Scalar(0,0,255): Scalar(0,255,255); 
			else color = Scalar(0,0,0); 
			circle(color_image, center, imgloc.width/2.0, color, 3); 
			circle(color_image, center, 2, color, 3); 
		}
		
		imshow(WINDOW_NAME, color_image);
		
		key = cvWaitKey(5);
	} 
	
	return 0;
}
Beispiel #17
0
//this is a sample for foreground detection functions
int main(int argc, const char** argv)
{
	help();

	CommandLineParser parser(argc, argv, keys);
	bool useCamera = parser.get<bool>("camera");
	string file = parser.get<string>("file_name");
    VideoCapture cap;
    bool update_mog2 = true;

    if( useCamera )
        cap.open(0);
    else
		cap.open(file.c_str());
	parser.printParams();

    if( !cap.isOpened() )
    {
        printf("can not open camera or video file\n");
        return -1;
    }
    
    namedWindow("image", CV_WINDOW_NORMAL);
    namedWindow("foreground mask", CV_WINDOW_NORMAL);
    namedWindow("foreground image", CV_WINDOW_NORMAL);
    namedWindow("mean background image", CV_WINDOW_NORMAL);

    BackgroundSubtractorMOG2 mog2(500, 16, false);
	/*initModule_video();
	Ptr<BackgroundSubtractor> mog2 = Algorithm::create<BackgroundSubtractor>("BackgroundSubtractor.MOG2");
                                     
	FileStorage fs("mog2.xml", FileStorage::WRITE);
	if (fs.isOpened()) {
		mog2->write(fs);
	} else {
		printf("Cannot open mog2 params file\n");
	}
*/

	printf("history: %d\n", mog2.history);
	printf("varThreshold: %f\n", mog2.varThreshold);
	printf("nmixtures: %d\n", mog2.nmixtures);
	//mog2.backgroundRatio = 0.1;
	printf("backgroundRatio: %f\n", mog2.backgroundRatio);
	mog2.varThresholdGen = 16;
	printf("varThresholdGen: %f\n", mog2.varThresholdGen);
	printf("fVarInit: %f\n", mog2.fVarInit);
	printf("fVarMax: %f\n", mog2.fVarMin);
	printf("fVarMin: %f\n", mog2.fVarMin);
	printf("fCT: %f\n", mog2.fCT);
	printf("nShadowDetection: %d\n", mog2.nShadowDetection);
	printf("fTau: %f\n", mog2.fTau);
	Mat img, fgmask, fgimg;

    for(;;)
    {
        cap >> img;
        
        if( img.empty() )
            break;
        
        //cvtColor(_img, img, COLOR_BGR2GRAY);
        
        if( fgimg.empty() )
          fgimg.create(img.size(), img.type());

        //update the model
        mog2(img, fgmask, update_mog2 ? -1 : 0);

        fgimg = Scalar::all(0);
        img.copyTo(fgimg, fgmask);

        Mat bgimg;
        mog2.getBackgroundImage(bgimg);

        imshow("image", img);
        imshow("foreground mask", fgmask);
        imshow("foreground image", fgimg);
        if(!bgimg.empty())
          imshow("mean background image", bgimg );

        char k = (char)waitKey(30);
        if( k == 27 ) break;
        if( k == ' ' )
        {
            update_mog2 = !update_mog2;
            if(update_mog2)
            	printf("Background update is on\n");
            else
            	printf("Background update is off\n");
        }
    }
    return 0;
}
int main( int argc, const char** argv )
{
    help();
	//-------------------load svm params from xml files-----------------------------
	CvSVM handsvm;
	handsvm.load("svmdata.xml");
	Mat trace=cvCreateMat(320,320,CV_8UC1);  //the rectangle area for svm classification
    VideoCapture cap;
    Rect trackWindow;
    int hsize = 16;
    float hranges[] = {0,180};
    const float* phranges = hranges;
    CommandLineParser parser(argc, argv, keys);
    int camNum = parser.get<int>("1");

    cap.open(camNum);

    if( !cap.isOpened() )
    {
        help();
        cout << "***Could not initialize capturing...***\n";
        cout << "Current parameter's value: \n";
        parser.printParams();
        return -1;
    }

    namedWindow( "Histogram", 0 );
    namedWindow( "CamShift Demo", 0 );
	namedWindow("hand area",0);
    setMouseCallback( "CamShift Demo", onMouse, 0 );
    createTrackbar( "Vmin", "CamShift Demo", &vmin, 256, 0 );
    createTrackbar( "Vmax", "CamShift Demo", &vmax, 256, 0 );
    createTrackbar( "Smin", "CamShift Demo", &smin, 256, 0 );

    Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(320, 320, CV_8UC3), backproj;
    bool paused = false;

    for(;;)
    {
        if( !paused )
        {
            cap >> frame;
            if( frame.empty() )
                break;
        }

        frame.copyTo(image);

        if( !paused )
        {
            cvtColor(image, hsv, CV_BGR2HSV);

            if( trackObject )
            {
                int _vmin = vmin, _vmax = vmax;

                inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)),
                        Scalar(180, 256, MAX(_vmin, _vmax)), mask);
                int ch[] = {0, 0};
                hue.create(hsv.size(), hsv.depth());
                mixChannels(&hsv, 1, &hue, 1, ch, 1);

                if( trackObject < 0 )
                {
                    Mat roi(hue, selection), maskroi(mask, selection);
                    calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
                    normalize(hist, hist, 0, 255, CV_MINMAX);

                    trackWindow = selection;
                    trackObject = 1;

                    histimg = Scalar::all(0);
                    int binW = histimg.cols / hsize;
                    Mat buf(1, hsize, CV_8UC3);
                    for( int i = 0; i < hsize; i++ )
                        buf.at<Vec3b>(i) = Vec3b(saturate_cast<uchar>(i*180./hsize), 255, 255);
                    cvtColor(buf, buf, CV_HSV2BGR);

                    for( int i = 0; i < hsize; i++ )
                    {
                        int val = saturate_cast<int>(hist.at<float>(i)*histimg.rows/255);
                        rectangle( histimg, Point(i*binW,histimg.rows),
                                   Point((i+1)*binW,histimg.rows - val),
                                   Scalar(buf.at<Vec3b>(i)), -1, 8 );
                    }
                }

                calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
                backproj &= mask;
				
                RotatedRect trackBox = CamShift(backproj, trackWindow,
                                    TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));
                if( trackWindow.area() <= 1 )
                {
                    int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5)/6;
                    trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
                                       trackWindow.x + r, trackWindow.y + r) &
                                  Rect(0, 0, cols, rows);
                }

                if( backprojMode )
                    cvtColor( backproj, image, CV_GRAY2BGR );
               
//-------------------------------svm classify the image-----------------------------------------------
				Mat cs(trace.size(),CV_8UC1);
				Mat cs1,cs2;
				//IplImage image0;
				//CvtColor(image, image0, CV_RGB2GRAY);
                //cvReleaseImage(image);
				cvtColor(image, cs1, CV_RGB2GRAY);
				cs1(trackWindow).copyTo(cs2);
				resize(cs2,cs,trace.size());
				
			    imshow("hand area",cs);
				
       
				Mat cs0 = cs.reshape(1, 1) + 0;
		        
		        cs0.convertTo(cs0,CV_32FC1);
				cout<<"the size of hue"<<cs.channels()<<cs.size()<<endl;
		        int result=handsvm.predict(cs0);
		        cout<<"the hand on"<<result<<endl;
//--------------------------mark the hand area with ellipse-----------------------------------------------
				 ellipse( image, trackBox, Scalar(0,0,255), 3, CV_AA );
            }
        }
        else if( trackObject < 0 )
            paused = false;
		
        if( selectObject && selection.width > 0 && selection.height > 0 )
        {
            Mat roi(image, selection);
			
            bitwise_not(roi, roi);
        }
		
		
		
		

        imshow( "CamShift Demo", image );
        imshow( "Histogram", histimg );

        char c = (char)waitKey(10);
        if( c == 27 )
            break;
        switch(c)
        {
        case 'b':
            backprojMode = !backprojMode;
            break;
        case 'c':
            trackObject = 0;
            histimg = Scalar::all(0);
            break;
        case 'h':
            showHist = !showHist;
            if( !showHist )
                destroyWindow( "Histogram" );
            else
                namedWindow( "Histogram", 1 );
            break;
        case 'p':
            paused = !paused;
            break;
        default:
            ;
        }
    }
Beispiel #19
0
int main(int argc, char** argv) {

  bool use_gui = false;
  double learningRate = -1;

  // time measurement
  timespec time_init;
  timespec time_now;
  timespec time_past;
  char fps[10] = "";

  clock_gettime(CLOCK_MONOTONIC, &time_init);
  clock_gettime(CLOCK_MONOTONIC, &time_now);

  // video source
  VideoCapture cap;

  if (argc > 1) {
    for (int i = 1; i < argc; i++) {
      
      // -d <deviceid>
      if (string(argv[i]) == "-d") {
        int device_id = -1;
        sscanf(argv[i+1], "%i", &device_id);
        cap.open(device_id);
        i++;

        if (cap.isOpened() != true) {
          cerr << "Error: Device " << device_id << " could not be opened.\n exiting..." << endl;
          return -1;
        }
      }

      // -f <filename>
      else if (string(argv[i]) == "-f") {
        string filename = string(argv[i+1]);
        cap.open(filename);
        i++;

        if (cap.isOpened() != true) {
          cerr << "Error: \"" << filename << "\" could not be opened.\n exiting..." << endl;
          return -1;
        }
      }
      // -g (gui)
      else if (string(argv[i]) == "-g") {
        use_gui = true;
      }

      // noise

      // learning rate
	  else if (string(argv[i]) == "-l") {
        sscanf(argv[i+1], "%lf", &learningRate);
		i++;
      }  
 
      // mode

      else {
        cerr << "Error: unknown parameter \"" << string(argv[i]) << "\"\n";
        usage();
        return -1;
      }
    }
  }
        
  if (cap.isOpened() != true) {
    cap.open(0);
  }

  if (cap.isOpened()!= true) {
    cerr << "Error: Cannot read device 0.\n exiting..." << endl;
    return -1;
  }

  Mat frame;  // the current frame
  Mat foreground, background;

  BackgroundSubtractorMOG2 bg(300, 16, false);

  std::vector<std::vector<cv::Point> > contours;

//  vector<string> detectors, detector_names;
//  detectors.push_back("/home/thomas/cv/tarantula/person.xml");
//  detector_names.push_back("person");

  if (use_gui == true) {
    namedWindow("frame", CV_WINDOW_AUTOSIZE); // current frame
//    namedWindow("foreground", CV_WINDOW_NORMAL);
    namedWindow("background", CV_WINDOW_NORMAL);
  }

//  LatentSvmDetector detector = LatentSvmDetector(detectors, detector_names);
//  vector<LatentSvmDetector::ObjectDetection> detections;
  
  cout << cap.get(CV_CAP_PROP_FRAME_WIDTH) << " x " << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;
  
  cap.set(CV_CAP_PROP_FRAME_WIDTH, 1024);
  cap.set(CV_CAP_PROP_FRAME_HEIGHT, 768);
//  cap.set(CV_CAP_PROP_FPS, 30);
  //cap.set();

  // main loop
  for (int f=0;;f++) {
    // write time
    clock_gettime(CLOCK_MONOTONIC, &time_past);

    if (!cap.read(frame)) {
      continue;
    }

    bg.operator() (frame, foreground, learningRate);
    if (use_gui == true) bg.getBackgroundImage(background);

    erode(foreground, foreground, Mat(), Point(-1, -1), 3);
    dilate(foreground, foreground, Mat(), Point(-1, -1), 3);

	if (use_gui == true) {
      findContours(foreground, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
      drawContours(frame, contours, -1, Scalar(0,0,255), 1);
	}

    double area;
    int size = contours.size();
	vector<vector<Point> > contours_poly( contours.size() );
	vector<RotatedRect> boundRect( contours.size() ) ;

    for(int i = 0; i < size; i++) {
      area = contourArea(contours[i]);
      if (area > 2000) {
//        cout << i+1 << "/" << size << ": " << area << endl;
        if (use_gui == true) {
		  drawContours(frame, contours, i, Scalar(0,255,255), 2);
		  approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
		  boundRect[i] = minAreaRect( contours_poly[i] );
        }
      }
    }

    // show images
    if (use_gui == true) {
	
	  for( int i = 0; i< contours.size(); i++ ) {
        //ellipse( frame, boundRect[i], Scalar(255,255,255), 2, 8 );
	    circle( frame, boundRect[i].center, 6, Scalar(0, 255, 0), 3); 
      }
		
      imshow("frame", frame);
//      imshow("foreground", foreground);
      imshow("background", background);
    }
    
    // calculate fps and display
    clock_gettime(CLOCK_MONOTONIC, &time_now);
	
    sprintf(fps, "%.2f fps, frame: %i, time: %.3f s, l: %.2e", getFps(calcTimeDiff (time_past, time_now)), f, calcTimeDiff (time_init, time_now), learningRate);
    if (use_gui == true) {
	  displayOverlay("frame", fps, 0);
	}
    cout << fps << endl;

    int c = waitKey(1);
    if (c == 'q' || c == 'Q' || (c & 255) == 27) {
    break;
    }
  }

  return 0;

}
Beispiel #20
0
int main(int argc , char *argv[]) {

	VideoCapture video;
	video.open(argv[1]);

	vector<vector<Point2f> > img_points;
	vector<vector<Point3f> > obj_points;

	Mat camMatrix;
	Mat distCoeff;
	vector<Mat> rvec;
	vector<Mat> tvec;


	Mat frame;

	char start;
	bool found = false;
	bool search = false;

	Size imgsize;

	while(video.read(frame)){


		cvtColor(frame,frame , CV_BGR2GRAY);

		imgsize = frame.size();



		if(search){
			vector<Point2f> corners;
			found = findChessboard(frame , corners , Size(6,3));

			if(found){
				drawChessboardCorners(frame, Size(6,3), Mat(corners), found);

				vector<Point3f>obj;
				for(int i = 0 ; i<3 ; i++){
					for(int j = 0 ; j<6 ; j++){
					obj.push_back(Point3f(i*2.36 , j*2.36 , 0.0f));
					}
				}

				obj_points.push_back(obj);
				img_points.push_back(corners);

			}

		}

		resize(frame,frame, Size(1280/2 , 1024/2));
		//resize(right,right, Size(1280/2 , 1024/2));

		imshow("video" , frame);

		start=waitKey(50);
		if(start=='q' && search == false){
			search = true;
		}else if(start=='q' && search == true){
			search = false;
		}else if(start=='x'){
			video.release();
			break;
		}
	}

	calibrate(imgsize , obj_points , img_points , camMatrix , distCoeff , rvec , tvec);


	double fovx , fovy , focallength , aspect;
	Point2d principal;

	calibrationMatrixValues(camMatrix , Size(1280,1024) , 6.9 , 5.5 , fovx , fovy , focallength , principal , aspect );

	cout<<"fovx: "<<fovx<<" fovy: "<<fovy<<" focal: "<<focallength<<" aspect: "<<aspect<<" principal.x: "<<principal.x<<
	" principal.y: "<<principal.y<< endl;

	video = VideoCapture();
	video.open(argv[1]);

	Mat undistorted;
	char save;
	while(video.read(frame)){

		undistort(frame , undistorted , camMatrix , distCoeff);
		imshow("original" , frame);
		imshow("undistorted" , undistorted);
		save =waitKey(5);
		if(save =='s'){
			FileStorage file("right_camMatrix.yml" , FileStorage::WRITE);
			file<<"camMatrix"<<camMatrix<<"distCoeffs"<<distCoeff;
			file.release();
		}
	}

	return 0;
}
Beispiel #21
0
int main(int argc, char *argv[]) {
    CommandLineParser parser(argc, argv, keys);
    parser.about(about);

    if(argc < 7) {
        parser.printMessage();
        return 0;
    }

    int markersX = parser.get<int>("w");
    int markersY = parser.get<int>("h");
    float markerLength = parser.get<float>("l");
    float markerSeparation = parser.get<float>("s");
    int dictionaryId = parser.get<int>("d");
    bool showRejected = parser.has("r");
    bool refindStrategy = parser.has("rs");
    int camId = parser.get<int>("ci");


    Mat camMatrix, distCoeffs;
    if(parser.has("c")) {
        bool readOk = readCameraParameters(parser.get<string>("c"), camMatrix, distCoeffs);
        if(!readOk) {
            cerr << "Invalid camera file" << endl;
            return 0;
        }
    }

    aruco::DetectorParameters detectorParams;
    if(parser.has("dp")) {
        bool readOk = readDetectorParameters(parser.get<string>("dp"), detectorParams);
        if(!readOk) {
            cerr << "Invalid detector parameters file" << endl;
            return 0;
        }
    }
    detectorParams.doCornerRefinement = true; // do corner refinement in markers

    String video;
    if(parser.has("v")) {
        video = parser.get<String>("v");
    }

    if(!parser.check()) {
        parser.printErrors();
        return 0;
    }

    aruco::Dictionary dictionary =
        aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME(dictionaryId));

    VideoCapture inputVideo;
    int waitTime;
    if(!video.empty()) {
        inputVideo.open(video);
        waitTime = 0;
    } else {
        inputVideo.open(camId);
        waitTime = 10;
    }

    float axisLength = 0.5f * ((float)min(markersX, markersY) * (markerLength + markerSeparation) +
                               markerSeparation);

    // create board object
    aruco::GridBoard board =
        aruco::GridBoard::create(markersX, markersY, markerLength, markerSeparation, dictionary);

    double totalTime = 0;
    int totalIterations = 0;

    while(inputVideo.grab()) {
        Mat image, imageCopy;
        inputVideo.retrieve(image);

        double tick = (double)getTickCount();

        vector< int > ids;
        vector< vector< Point2f > > corners, rejected;
        Vec3d rvec, tvec;

        // detect markers
        aruco::detectMarkers(image, dictionary, corners, ids, detectorParams, rejected);

        // refind strategy to detect more markers
        if(refindStrategy)
            aruco::refineDetectedMarkers(image, board, corners, ids, rejected, camMatrix,
                                         distCoeffs);

        // estimate board pose
        int markersOfBoardDetected = 0;
        if(ids.size() > 0)
            markersOfBoardDetected =
                aruco::estimatePoseBoard(corners, ids, board, camMatrix, distCoeffs, rvec, tvec);

        double currentTime = ((double)getTickCount() - tick) / getTickFrequency();
        totalTime += currentTime;
        totalIterations++;
        if(totalIterations % 30 == 0) {
            cout << "Detection Time = " << currentTime * 1000 << " ms "
                 << "(Mean = " << 1000 * totalTime / double(totalIterations) << " ms)" << endl;
        }

        // draw results
        image.copyTo(imageCopy);
        if(ids.size() > 0) {
            aruco::drawDetectedMarkers(imageCopy, corners, ids);
        }

        if(showRejected && rejected.size() > 0)
            aruco::drawDetectedMarkers(imageCopy, rejected, noArray(), Scalar(100, 0, 255));

        if(markersOfBoardDetected > 0)
            aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvec, tvec, axisLength);

        imshow("out", imageCopy);
        char key = (char)waitKey(waitTime);
        if(key == 27) break;
    }

    return 0;
}
Beispiel #22
0
int main( int argc, char** argv )
{
    int i;
    int width = 1920, height = 1080;
    VideoCapture capture;
    int delay = 1000;
    int cameraId = 0;

    for( i = 1; i < argc; i++ )
    {
        const char* s = argv[i];
        if( strcmp( s, "-w" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &width ) != 1 || width <= 0 )
                return fprintf( stderr, "Invalid width\n" ), -1;
        }
        else if( strcmp( s, "-h" ) == 0 )
        {
            if( sscanf( argv[++i], "%u", &height ) != 1 || height <= 0 )
                return fprintf( stderr, "Invalid height\n" ), -1;
        }
        else if( strcmp( s, "-help" ) == 0 )
        {
            help();
            return 0;
        }
        else if( s[0] != '-' )
        {
            if( isdigit(s[0]) )
                sscanf(s, "%d", &cameraId);
            else
                fprintf( stderr, "Invalid Camera Id %s", s);
        }
        else
            return fprintf( stderr, "Unknown option %s", s ), -1;
    }

    capture.open(cameraId);

    if( !capture.isOpened() )
        return fprintf( stderr, "Could not initialize video (%d) capture\n",cameraId ), -2;

    capture.set(cv::CAP_PROP_FRAME_WIDTH, width);
    capture.set(cv::CAP_PROP_FRAME_HEIGHT, height);

    if( capture.isOpened() )
        printf( "%s", liveCaptureHelp );

    namedWindow( "Image View", cv::WINDOW_NORMAL );

    int wwidth = width, wheight = height;
    int vwidth = width, vheight = height;
    int top = 0, left = 0;
    resizeWindow("Image View", wwidth, wheight);

    for(;;)
    {
        int pindex = 0;
        Rect r(left, top, vwidth, vheight);
        Mat view0;
        capture >> view0;

        Mat view(view0, r);

        imshow("Image View", view);
        int key = 0xff & waitKey(50);

        if( (key & 255) == 27 )
            break;

        switch( key ) {
        case 's':
            {
                char m[MAX_PATH];
                sprintf(m, "image%04d.png", pindex);
                string s = imwrite( m, view) ? " Saved.": "fail to save" ;
                printf("%s: %s\n", m, s.c_str());
                pindex++;
            }
            break;
        case '+':
            vwidth = vwidth > 20 ?  vwidth-(vwidth/10) : 10;
            vheight = wheight > 20 ?  vheight-(vheight/10) : 10;
            break;
        case '-':
            vwidth = vwidth+(wwidth/10) >= width-left ?  width-left : vwidth+(wwidth/10);
            vheight = vheight+(wheight/10) >= height-top ?  height-top: vheight+(wheight/10);
            break;
        case 'u':
            top = top-(vheight/10) <= 0 ? 0: top-(vheight/10);
            break;
        case 'd':
            top = top+(vheight/10) >= height-vheight ? height-vheight : top+(vheight/10);
            break;
        case 'l':
            left = left < vwidth/10 ? 0 : left-(vwidth/10);
            break;
        case 'r':
            left = left+(vwidth/10) >= width-vwidth ? width-vwidth : left+vwidth/10;
            break;
        case 'i':
            wwidth = wwidth-(wwidth/10) > 20 ?  wwidth-(wwidth/10) : 10;
            wheight = wheight-(wheight/10) > 20 ? wheight-(wheight/10) : 10;
            resizeWindow("Image View", wwidth, wheight);
            break;
        case 'o':
            wwidth = wwidth+(wwidth/10) >= width ?  width : wwidth+(wwidth/10);
            wheight = wheight+(wheight/10) >= height ? height: wheight+(wheight/10);
            resizeWindow("Image View", wwidth, wheight);
            break;
        }
    }

    return 0;
}
Beispiel #23
0
int main( int argc, char** argv)
{
  if ( argc != 2 )
  {
    cout << "Usage :" << argv[0] << " <video/image filename or camera device>" << endl;
    exit(0);
  }

  cout << "Opening " << argv[1] << endl;
  capture = new Capture( argv[1]);

  char type[32];
  strcpy( type, capture->getType());
  cout << "Type :" << type << endl;

  float fps = 100;

  cvNamedWindow( "Video", 1);

  if ( !strncmp( type, "VIDEO", 5))
  {
    delete capture;
    capture = new VideoCapture( argv[1]);

    VideoCapture* temp = (VideoCapture*) capture;

    //cout << "Width : " <<  temp->getWidth() << endl;
    //cout << "Height : " <<  temp->getHeight() << endl;
    //cout << "Duration : " << temp->getDuration() << endl;
    //cout << "FPS : " << temp->getFPS() << endl;

    fps = 25;
  }
  else
  {
    fprintf( stderr, "Couldn't Not Detect Type of Source");
    fprintf( stderr, " or No Handeller Available\n");
    exit(0);
  }

  if ( !(strncmp( capture->getType(), "VIDEO", 5)))
  {
    VideoCapture* temp = (VideoCapture*) capture;
    temp->play();
    cout << capture->getStatus();
  }

  IplImage* image = NULL;

  if (( capture->getWidth() > 0) && (capture->getHeight() > 0))
  {
    image = cvCreateImageHeader( cvSize(capture->getWidth(),
          capture->getHeight()), IPL_DEPTH_8U, 3);
  }
  else
  {
    cout << capture->getStatus();
    return 0;
  }
  //Assuming height 1100 and width 2000
  vecDetections =  new vector<Detection>* [1100];
  for( int i=0; i<1100; i++)
    vecDetections[i] = new vector<Detection>[2000];


  char key=' ';
  for( int i = 0; ;)
  {
    image->imageData = (char*) capture->getNextFrame();
    processFrameFastHog(image);

    if ( image->imageData == NULL) break;

    cvShowImage( "Video", image);

    key = cvWaitKey((int) 1000.0 / fps);
    if (( key == '\n') && ( i == 0))
    {
      i = 1;
      if ( !(strncmp( capture->getType(), "VIDEO", 5)))
      {
        VideoCapture* temp = (VideoCapture*) capture;
        temp->pause();
      }
    }
    else if( key == 27) break;
  }

  fstream outfile;
  outfile.open("dump.txt", ios_base::out);

  for(int i=0; i<capture->getHeight(); i++)
    for(int j=0; j<capture->getWidth(); j++)
    {
      vector<Detection> detVec = vecDetections[i][j];
      for(int k=0; k<detVec.size(); k++)
      {
        outfile << detVec[k].y << " " << detVec[k].x << " " << detVec[k].width;
        outfile << " " << detVec[k].height << " " << detVec[k].scale << " " << detVec[k].score << endl;
      }
    }

  outfile.close();

  HOGEngine::Instance()->FinalizeHOG();
  delete capture;
  return 0;
}
Beispiel #24
0
//! Finds Poms and goes to them based on HSV values
bool goToPom(colorRange range, void* ourBot)
{
    if (!cap.isOpened()) return false;
    vector<vector<int> > orderedContours;
    Mat source, chans, singleChan, tmpMatA, tmpMatB;
    vector< vector<Point> > contours;
    vector<Mat> hueChan(3);
    vector<int> tmpCont(3);
    int tmpInt;
    Point2f center;
    float radius;
    for(int i=0; i < 10; i++)
    {
        cap >> source;
#ifdef LOG
        strcpy(dest, "pics/");
        cout << "Pic" << ++pic << endl;
        itoa(pic,picCurrent,10);
        strcat(dest, picCurrent);
        strcat(dest, ".png");
        imwrite(dest, source, compression_params);
#endif
        blur(source, source, Size(5, 5));
        cvtColor(source, chans, CV_BGR2HSV);
        split(chans, hueChan);
//Seperate the channels
//Channel 1(Hue)
        if(range.getHueMin()+range.getHueRange()<180) inRange(hueChan[0], range.getHueMin(), range.getHueMin()+range.getHueRange(), singleChan);
        else
        {
            compare(hueChan[0], range.getHueMin()+range.getHueRange()-180, tmpMatB, CMP_LE);
            compare(hueChan[0], range.getHueMin(), tmpMatA, CMP_GE);
            bitwise_or(tmpMatA, tmpMatB, singleChan, Mat());
        }
        compare(singleChan, 0, singleChan, CMP_GT);
//Channel 2(Saturation)
        inRange(hueChan[1], range.getSatMin(), range.getSatMin()+range.getSatRange(), tmpMatA);
        bitwise_and(singleChan, tmpMatA, singleChan, Mat());
//Channel 3(Value)
        inRange(hueChan[2], range.getValMin(), range.getValMin()+range.getValRange(), tmpMatA);
        bitwise_and(singleChan, tmpMatA, singleChan, Mat());
#ifdef LOG
        strcpy(dest, "pics/");
        cout << "Pic" << pic << endl;
        strcat(dest, picCurrent);
        strcat(dest, "A.png");
        imwrite(dest, singleChan, compression_params);
#endif
//Find and sort the contours
        findContours(singleChan, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
        for(unsigned int i=0; i < contours.size(); i++)
        {
            tmpInt = contourArea(contours[i]);
            //if (tmpInt < 500) continue;
            tmpCont[0]=tmpInt;
            tmpCont[2]=i;
            orderedContours.push_back(tmpCont);
        }
        if (orderedContours.size() < 1) continue;
        sort(orderedContours.begin(), orderedContours.end(), greaterArea);
//Find radius and center
        minEnclosingCircle((Mat)contours[orderedContours[0][2]], center, radius);
#ifdef DEBUG_POMS
        cout << "Center x:" << center.x << " y:" << center.y << " with radius " << radius << " and area " << orderedContours[0][0] << endl;
#endif// DEBUG_POMS
        break;
    }
    while (ABS(center.x - CENTERX) > errorX || center.y < YBARRIER)
    {
#ifdef SATAN
		cout << "Hail Satan meaningless beings" << endl;
#endif
        orderedContours.clear();
        cap >> source;
#ifdef LOG
        strcpy(dest, "pics/");
        cout << "Pic" << ++pic << endl;
        itoa(pic,picCurrent,10);
        strcat(dest, picCurrent);
        strcat(dest, ".png");
        imwrite(dest, source, compression_params);
#endif
        blur(source, source, Size(5, 5));
        cvtColor(source, chans, CV_BGR2HSV);
        split(chans, hueChan);
//Seperate the channels
        if(range.getHueMin()+range.getHueRange()<180) inRange(hueChan[0], range.getHueMin(), range.getHueMin()+range.getHueRange(), singleChan);
        else
        {
            compare(hueChan[0], (range.getHueMin()+range.getHueRange())%180, tmpMatB, CMP_LE);
            compare(hueChan[0], range.getHueMin(), tmpMatA, CMP_GE);
            bitwise_or(tmpMatA, tmpMatB, singleChan, Mat());
        }
        compare(singleChan, 0, singleChan, CMP_GT);
//Channel 2(Saturation)
        inRange(hueChan[1], range.getSatMin(), range.getSatMin()+range.getSatRange(), tmpMatA);
        bitwise_and(singleChan, tmpMatA, singleChan, Mat());
//Channel 3(Value)
        inRange(hueChan[2], range.getValMin(), range.getValMin()+range.getValRange(), tmpMatA);
        bitwise_and(singleChan, tmpMatA, singleChan, Mat());
#ifdef LOG
        strcpy(dest, "pics/");
        strcat(dest, picCurrent);
        strcat(dest, "A.png");
        imwrite(dest, singleChan, compression_params);
#endif
//Find and sort the contours
        findContours(singleChan, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
        for(unsigned int i=0; i < contours.size(); i++)
        {
            tmpInt = contourArea(contours[i]);
            //if (tmpInt < 500) continue;
            tmpCont[0]=tmpInt;
            tmpCont[2]=i;
            orderedContours.push_back(tmpCont);
        }
		sort(orderedContours.begin(), orderedContours.end(), greaterArea);
#ifdef RITALIN
		tmpInt = checkContours(contours, orderedContours);
		if (orderedContours.size() < 1 || tmpInt < 0)
		{
			lastCenter=Point(-1,-1);
#ifdef DEBUG_POMS
			cout << ((tmpInt < 0) ? "Ridalin failed" : "Can't see nuttin") << endl;
#endif
#else
		if (orderedContours.size() < 1)
		{
#endif
#ifdef DEBUG_POMS
            cout << "We lost da dad gum pom" << endl;
#endif
			if (ticksLost < MAXCORRECT && (lastVel[LMOTOR] != 0  || lastVel[RMOTOR] != 0))
			{
				mav(LMOTOR, -2*lastVel[LMOTOR]);
				mav(RMOTOR, -2*lastVel[RMOTOR]);
			}
			else if (ticksLost < MAXCORRECT+MAXLOST)
			{
				mav(LMOTOR, -200);
				mav(RMOTOR,  200);
			}
			else
			{
				mav(LMOTOR,  200);
				mav(RMOTOR, -200);
			}
			if(ticksLost++ > MAXCORRECT+MAXLOST*3) { ticksLost=0; return false; }
            continue;
        }
//Find radius and center
#ifdef RITALIN
		minEnclosingCircle((Mat)contours[orderedContours[tmpInt][2]], center, radius);
#else
        minEnclosingCircle((Mat)contours[orderedContours[0][2]], center, radius);
#endif// RITALIN
#ifdef DEBUG_POMS
        cout << "Center x:" << center.x << " y:" << center.y << " with radius " << radius << " and area " << orderedContours[0][0] << endl;
#endif// DEBUG_POMS
		lastVel[LMOTOR] = 10*(YBARRIER-center.y) - 4*(CENTERX-center.x);
		lastVel[RMOTOR] = 10*(YBARRIER-center.y) + 4*(CENTERX-center.x);
		tmpInt = (YBARRIER-center.y) != 0 ? SIGN((YBARRIER-center.y)) : -1;
		while (ABS(lastVel[LMOTOR]) < MINVEL || ABS(lastVel[RMOTOR]) < MINVEL)
		{
			lastVel[LMOTOR] += tmpInt;
			lastVel[RMOTOR] += tmpInt;
		}
#ifdef DEBUG_POMS
		cout << "Turning l:" << lastVel[LMOTOR] << " r:" << lastVel[RMOTOR] << endl;
#endif// DEBUG_POMS
		mav(LMOTOR, lastVel[LMOTOR]);
		mav(RMOTOR, lastVel[RMOTOR]);
    }
#ifdef DEBUG_POMS
    cout << "We has da gone ta it" << endl;
#endif
    off(LMOTOR);
    off(RMOTOR);
    return true;
}

//! Tries to move orange away from green(Tracks green)
bool moveOrangeBack(colorRange rangeA, void* ourBot)
{
    bool retval = goToPom(rangeA, 0);
    if(!retval) return false;
    moveClaw(CLAW_OPEN);
//Grab it, move it back and turn to look at it
    mav(LMOTOR, 1500);
    mav(RMOTOR, 1500);
    msleep(250);
    moveClaw(CLAW_CLOSED);
    mav(LMOTOR, -1500);
    mav(RMOTOR, -1500);
    msleep(750);
    mav(LMOTOR,  500);
    mav(RMOTOR, -500);
    moveClaw(CLAW_OPEN);
    mav(LMOTOR, -500);
    mav(RMOTOR,  500);
    msleep(750);
    mav(LMOTOR, -1000);
    mav(RMOTOR, -1000);
    msleep(1250);
    mav(LMOTOR,  900);
    mav(RMOTOR, -900);
    msleep(100);
    off(LMOTOR);
    off(RMOTOR);
#ifdef RITALIN
	//invalidate the sensor after we screw with it
	lastCenter=Point(-1, -1);
#endif
    return true;
}
Beispiel #25
0
static void focusDriveEnd(VideoCapture & cap, int direction)
{
    while (cap.set(CAP_PROP_ZOOM, (double) MAX_FOCUS_STEP * direction))
        ;
}
Beispiel #26
0
/**
 \param [in] rangeA the range for the color we want to find
 \param [in] rangeB the range of the color we want to avoid
 */
bool retrieveGreen(colorRange rangeA, colorRange rangeB, void* ourBot)
{
#ifdef ONCOMP
    Mat drawing;
    int width=160, height=120;
#else
//Create our camera
    if (!cap.isOpened()) return false;
#endif// ONCOMP
//Define misc variables
    vector<vector<int> > orderedContoursA, orderedContoursB;
    Mat source, chans, singleChan, tmpMatA, tmpMatB;
    vector<vector<Point> > contoursA, contoursB;
    vector<Mat> hueChan(3);
    vector<int> tmpCont(3);
    int tmpInt;
    Point2f centerA, centerB;
    float radiusA, radiusB;
    for(int i=0; i<8; i++)
    {
        moveOrangeBack(rangeA,0);//) return false;
//clear out contours
        orderedContoursA.clear();
        orderedContoursB.clear();
//Find contours for what we want to grab to start
        cap >> source;
#ifdef LOG
        strcpy(dest, "pics/");
        cout << "Pic" << ++pic << endl;
        itoa(pic,picCurrent,10);
        strcat(dest, picCurrent);
        strcat(dest, ".png");
        imwrite(dest, source, compression_params);
#endif
//Setup the image for parsing
        blur(source, source, Size(5, 5));
        cvtColor(source, chans, CV_BGR2HSV);
        split(chans, hueChan);
//Filter the image
        if(rangeA.getHueMin()+rangeA.getHueRange()<180) inRange(hueChan[0], rangeA.getHueMin(), rangeA.getHueMin()+rangeA.getHueRange(), singleChan);
        else
        {
            compare(hueChan[0], (rangeA.getHueMin()+rangeA.getHueRange())%180, tmpMatB, CMP_LE);
            compare(hueChan[0], rangeA.getHueMin(), tmpMatA, CMP_GE);
            bitwise_or(tmpMatA, tmpMatB, singleChan, Mat());
        }
        compare(singleChan, 0, singleChan, CMP_GT);
//Channel 2(Saturation)
        inRange(hueChan[1], rangeA.getSatMin(), rangeA.getSatMin()+rangeA.getSatRange(), tmpMatA);
        bitwise_and(singleChan, tmpMatA, singleChan, Mat());
//Channel 3(Value)
        inRange(hueChan[2], rangeA.getValMin(), rangeA.getValMin()+rangeA.getValRange(), tmpMatA);
        bitwise_and(singleChan, tmpMatA, singleChan, Mat());
#ifdef LOG
        strcpy(dest, "pics/");
        strcat(dest, picCurrent);
        strcat(dest, "A.png");
        imwrite(dest, singleChan, compression_params);
#endif// LOG
//Find the contours and sort them by area
        findContours(singleChan, contoursA, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
        for(unsigned int j=0; j < contoursA.size(); j++)
        {
            tmpInt = contourArea(contoursA[j]);
            //if (tmpInt < 500) continue;
            tmpCont[0]=tmpInt;
            tmpCont[2]=j;
            orderedContoursA.push_back(tmpCont);
        }
        sort(orderedContoursA.begin(), orderedContoursA.end(), greaterArea);
#ifdef RITALIN
		tmpInt=checkContours(contoursA, orderedContoursA);
        if(orderedContoursA.size()<1 || tmpInt < 0)
        {
        	lastCenter=Point(-1,-1);
#else
        if (orderedContoursA.size() < 1) //We couldn't find anything to look for
        {
#endif
#ifdef DEBUG_RETGREEN
            cout << "We were unable find what we wanted" << endl;
#endif
            return false;
        }
#ifdef RITALIN
	minEnclosingCircle((Mat)contoursA[orderedContoursA[tmpInt][2]], centerA, radiusA);
#else
	minEnclosingCircle((Mat)contoursA[orderedContoursA[0][2]], centerA, radiusA);
#endif
//Find the contours for what we want to avoid next
        if(rangeB.getHueMin()+rangeB.getHueRange()<180) inRange(hueChan[0], rangeB.getHueMin(), rangeB.getHueMin()+rangeB.getHueRange(), singleChan);
        else
        {
            compare(hueChan[0], (rangeB.getHueMin()+rangeB.getHueRange())%180, tmpMatB, CMP_LE);
            compare(hueChan[0], rangeB.getHueMin(), tmpMatA, CMP_GE);
            bitwise_or(tmpMatA, tmpMatB, singleChan, Mat());
        }
        compare(singleChan, 0, singleChan, CMP_GT);
//Channel 2(Saturation
        inRange(hueChan[1], rangeB.getSatMin(), rangeB.getSatMin()+rangeB.getSatRange(), tmpMatA);
        bitwise_and(singleChan, tmpMatA, singleChan, Mat());
//Channel 3(Value)
        inRange(hueChan[2], rangeB.getValMin(), rangeB.getValMin()+rangeB.getValRange(), tmpMatA);
        bitwise_and(singleChan, tmpMatA, singleChan, Mat());
#ifdef LOG
        strcpy(dest, "pics/");
        strcat(dest, picCurrent);
        strcat(dest, "B.png");
        imwrite(dest, singleChan, compression_params);
#endif// LOG
//Find the contours from the filtered image and sort by size
        findContours(singleChan, contoursB, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
        for(unsigned int j=0; j < contoursB.size(); j++)
        {
            tmpInt = contourArea(contoursB[j]);
            if (tmpInt < 10) continue;
            tmpCont[0]=tmpInt;
            tmpCont[2]=j;
            orderedContoursB.push_back(tmpCont);
        }

#ifdef DEBUG_RETGREEN
        cout << "We found it to start" << endl;
#endif

		cout << "Good Center x:" << centerA.x << " y:" << centerA.y << " With radius " << radiusA << " and Area " << orderedContoursA[0][0] << endl;
        for(unsigned int j=0; j < orderedContoursB.size(); j++)
        {
            minEnclosingCircle((Mat)contoursB[orderedContoursB[j][2]], centerB, radiusB);
            if(((centerA.x-centerB.x)*(centerA.x-centerB.x) + (centerA.y-centerB.y)*(centerA.y-centerB.y) >= (errorSep+radiusB)*(errorSep+radiusB)))// || ((centerA.y-radiusA <= centerB.y) && (ABS(centerA.x-centerB.x) <= 15+2*radiusB)))
            {
            	moveClaw(CLAW_CLOSED);
            	goToPom(rangeA, 0);
            	mav(LMOTOR, -900);
            	mav(RMOTOR, -900);
            	msleep(200);
            	off(LMOTOR);
				off(RMOTOR);
				moveClaw(CLAW_OPEN);
				mav(LMOTOR, 900);
				mav(RMOTOR, 900);
				msleep(750);
				off(LMOTOR);
				off(RMOTOR);
				moveClaw(CLAW_CLOSED);
				cout << "We got it" << endl;
				return true;
            }
            else
            {
				cout << "Bad Center x:" << centerB.x << " y:" << centerB.y << " With radius " << radiusB << " and Area " << orderedContoursB[j][0] << endl;
				cout << "It is too close to the orange" << endl;
				break;
            }
        }
    }
    disable_servos();
    cout << "We couldn't find nuttin" << endl;
    return false;
}

#ifdef TESTCASES_POMS
int main(int argc, char* argv[])
{
    goToPom(orangeRange(), 0);
    off(LMOTOR);
    off(RMOTOR);
    return 0;
}
Beispiel #27
0
int startTCPClient(char *servername, int port)
{
    pthread_t   sendthread;
    pthread_t recthread;
    int         key;
    Size size(640,480);
    
    server_ip = servername;
    server_port = port;
    
    
    int clientSock = establishTCPConnection();

    capture.open(0);
    
    if (!capture.isOpened()) {
        quit("\n--> cvCapture failed", 1);
    }
    
    
    
    // run the sending thread
    if (pthread_create(&sendthread, NULL, sendData, &clientSock)) {
        quit("\n--> pthread_create failed.", 1);
    }

    
    // run the receiving thread
    if (pthread_create(&recthread, NULL, receiveDataThread, &clientSock)) {
        quit("\n--> pthread_create failed.", 1);
    }
    
    capture >> img0;
    img1 = Mat::zeros(img0.rows, img0.cols ,CV_8UC1);

    cout << "\n--> Press 'q' to quit. \n\n" << endl;
    
    /* print the width and height of the frame, needed by the client */
    
    
    namedWindow("stream_client", CV_WINDOW_AUTOSIZE);
    //flip(img0, img0, 1);
    
    //cvtColor(img0, img1, CV_BGR2GRAY);
    
    while(key != 'q') {
        /* get a frame from camera */
        capture >> img0;
        if (img0.empty()) break;
        
        pthread_mutex_lock(&gmutex);
        resize(img0,img0,size);
        flip(img0, img0, 1);
        
        cvtColor(img0, img1, CV_BGR2GRAY);
        //img1 = img0.clone();
        is_data_ready = 1;
        
        pthread_mutex_unlock(&gmutex);
        
        /*also display the video here on client */
        
        imshow("stream_client", img0);
        key = waitKey(30);
    }
    
    /* user has pressed 'q', terminate the streaming client */
    if (pthread_cancel(sendthread) || pthread_cancel(recthread) ) {
        quit("\n--> pthread_cancel failed.", 1);
    }
    
    /* free memory */
    destroyWindow("stream_client");
    quit("\n--> NULL", 0);
    return 0;
}
Beispiel #28
0
int main(int argc, char** argv)
{
	VideoCapture capture;
	char* video = argv[1];
	const char* outputFile = argv[2];
	FILE *pFile = fopen(outputFile,"w");
	//int flag = arg_parse(argc, argv);
	bool flag = false;
	capture.open(video);

	if(!capture.isOpened()) {
		fprintf(stderr, "Could not initialize capturing..\n");
		return -1;
	}

	int frame_num = 0;
	TrackInfo trackInfo;
	DescInfo hogInfo, hofInfo, mbhInfo;

	InitTrackInfo(&trackInfo, track_length, init_gap);
	InitDescInfo(&hogInfo, 8, false, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&hofInfo, 9, true, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&mbhInfo, 8, false, patch_size, nxy_cell, nt_cell);

	SeqInfo seqInfo;
	InitSeqInfo(&seqInfo, video);

	if(flag)
		seqInfo.length = end_frame - start_frame + 1;

//	fprintf(stderr, "video size, length: %d, width: %d, height: %d\n", seqInfo.length, seqInfo.width, seqInfo.height);

	if(show_track == 1)
		namedWindow("DenseTrack", 0);

	Mat image, prev_grey, grey;

	std::vector<float> fscales(0);
	std::vector<Size> sizes(0);

	std::vector<Mat> prev_grey_pyr(0), grey_pyr(0), flow_pyr(0);
	std::vector<Mat> prev_poly_pyr(0), poly_pyr(0); // for optical flow

	std::vector<std::list<Track> > xyScaleTracks;
	int init_counter = 0; // indicate when to detect new feature points
	while(true) {
		Mat frame;
		int i, j, c;

		// get a new frame
		capture >> frame;
		if(frame.empty())
			break;

		if(frame_num < start_frame || frame_num > end_frame) {
			frame_num++;
			continue;
		}

		if(frame_num == start_frame) {
			image.create(frame.size(), CV_8UC3);
			grey.create(frame.size(), CV_8UC1);
			prev_grey.create(frame.size(), CV_8UC1);

			InitPry(frame, fscales, sizes);

			BuildPry(sizes, CV_8UC1, prev_grey_pyr);
			BuildPry(sizes, CV_8UC1, grey_pyr);

			BuildPry(sizes, CV_32FC2, flow_pyr);
			BuildPry(sizes, CV_32FC(5), prev_poly_pyr);
			BuildPry(sizes, CV_32FC(5), poly_pyr);

			xyScaleTracks.resize(scale_num);

			frame.copyTo(image);
			cvtColor(image, prev_grey, CV_BGR2GRAY);

			for(int iScale = 0; iScale < scale_num; iScale++) {
				if(iScale == 0)
					prev_grey.copyTo(prev_grey_pyr[0]);
				else
					resize(prev_grey_pyr[iScale-1], prev_grey_pyr[iScale], prev_grey_pyr[iScale].size(), 0, 0, INTER_LINEAR);

				// dense sampling feature points
				std::vector<Point2f> points(0);
				DenseSample(prev_grey_pyr[iScale], points, quality, min_distance);

				// save the feature points
				std::list<Track>& tracks = xyScaleTracks[iScale];
				for(i = 0; i < points.size(); i++)
					tracks.push_back(Track(points[i], trackInfo, hogInfo, hofInfo, mbhInfo));
			}

			// compute polynomial expansion
			my::FarnebackPolyExpPyr(prev_grey, prev_poly_pyr, fscales, 7, 1.5);

			frame_num++;
			continue;
		}

		init_counter++;
		frame.copyTo(image);
		cvtColor(image, grey, CV_BGR2GRAY);

		// compute optical flow for all scales once
		my::FarnebackPolyExpPyr(grey, poly_pyr, fscales, 7, 1.5);
		my::calcOpticalFlowFarneback(prev_poly_pyr, poly_pyr, flow_pyr, 10, 2);

		for(int iScale = 0; iScale < scale_num; iScale++) {
			if(iScale == 0)
				grey.copyTo(grey_pyr[0]);
			else
				resize(grey_pyr[iScale-1], grey_pyr[iScale], grey_pyr[iScale].size(), 0, 0, INTER_LINEAR);

			int width = grey_pyr[iScale].cols;
			int height = grey_pyr[iScale].rows;

			// track feature points in each scale separately
			std::list<Track>& tracks = xyScaleTracks[iScale];
			for (std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end();) {
				int index = iTrack->index;
				Point2f prev_point = iTrack->point[index];
				int x = std::min<int>(std::max<int>(cvRound(prev_point.x), 0), width-1);
				int y = std::min<int>(std::max<int>(cvRound(prev_point.y), 0), height-1);

				Point2f point;
				point.x = prev_point.x + flow_pyr[iScale].ptr<float>(y)[2*x];
				point.y = prev_point.y + flow_pyr[iScale].ptr<float>(y)[2*x+1];
 
				if(point.x <= 0 || point.x >= width || point.y <= 0 || point.y >= height) {
					iTrack = tracks.erase(iTrack);
					continue;
				}

				iTrack->addPoint(point);

				// draw the trajectories at the first scale
				if(show_track == 1 && iScale == 0)
					DrawTrack(iTrack->point, iTrack->index, fscales[iScale], image);

				// if the trajectory achieves the maximal length
				if(iTrack->index >= trackInfo.length) {
					std::vector<Point2f> trajectory(trackInfo.length+1);
					std::vector<Point2f> trajectory2(trackInfo.length+1);
					for(int i = 0; i <= trackInfo.length; ++i)
						trajectory[i] = iTrack->point[i]*fscales[iScale];
						
					trajectory2 = trajectory;
				
					float mean_x(0), mean_y(0), var_x(0), var_y(0), length(0);
					if(IsValid(trajectory, mean_x, mean_y, var_x, var_y, length)) {
//						printf("%d\t%f\t%f\t%f\t%f\t%f\t%f\t", frame_num, mean_x, mean_y, var_x, var_y, length, fscales[iScale]);
						fprintf(pFile, "%d\t",frame_num);

//						// for spatio-temporal pyramid
//						printf("%f\t", std::min<float>(std::max<float>(mean_x/float(seqInfo.width), 0), 0.999));
//						printf("%f\t", std::min<float>(std::max<float>(mean_y/float(seqInfo.height), 0), 0.999));
//						printf("%f\t", std::min<float>(std::max<float>((frame_num - trackInfo.length/2.0 - start_frame)/float(seqInfo.length), 0), 0.999));
					
//						// output the trajectory
//						for (int i = 0; i < trackInfo.length; ++i)
//							printf("%f\t%f\t", trajectory[i].x,trajectory[i].y);
						for (int i = 0; i < trackInfo.length; ++i)
							fprintf(pFile, "%f\t%f\t", trajectory2[i].x,trajectory2[i].y);

						fprintf(pFile, "\n");
					}

					iTrack = tracks.erase(iTrack);
					continue;
				}
				++iTrack;
			}

			if(init_counter != trackInfo.gap)
				continue;

			// detect new feature points every initGap frames
			std::vector<Point2f> points(0);
			for(std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); iTrack++)
				points.push_back(iTrack->point[iTrack->index]);

			DenseSample(grey_pyr[iScale], points, quality, min_distance);
			// save the new feature points
			for(i = 0; i < points.size(); i++)
				tracks.push_back(Track(points[i], trackInfo, hogInfo, hofInfo, mbhInfo));
		}

		init_counter = 0;
		grey.copyTo(prev_grey);
		for(i = 0; i < scale_num; i++) {
			grey_pyr[i].copyTo(prev_grey_pyr[i]);
			poly_pyr[i].copyTo(prev_poly_pyr[i]);
		}

		frame_num++;

		if( show_track == 1 ) {
			imshow( "DenseTrack", image);
			c = cvWaitKey(3);
			if((char)c == 27) break;
		}
	}

	if( show_track == 1 )
		destroyWindow("DenseTrack");

	fclose(pFile);

	return 0;
}
Beispiel #29
0
int main(int argc, char** argv)
{
        pthread_t   thread_c;
        int         key;

        if (argc < 3) {
                quit("Usage: netcv_client <server_ip> <server_port> <input_file>(optional)", 0);
        }
        if (argc == 4) {
                capture.open(argv[3]);
        } else {
                capture.open(0);
        }

        if (!capture.isOpened()) {
                quit("\n--> cvCapture failed", 1);
        }

        server_ip   = argv[1];
        server_port = atoi(argv[2]);



	capture.set(CV_CAP_PROP_FRAME_WIDTH,320);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT,240);



        capture >> img0;
        img1 = Mat::zeros(img0.rows, img0.cols ,CV_8UC1);

        // run the streaming client as a separate thread 
        if (pthread_create(&thread_c, NULL, streamClient, NULL)) {
                quit("\n--> pthread_create failed.", 1);
        }

        cout << "\n--> Press 'q' to quit. \n\n" << endl;

        /* print the width and height of the frame, needed by the client */
        cout << "\n--> Transferring  (" << img0.cols << "x" << img0.rows << ")  images to the:  " << server_ip << ":" << server_port << endl;

        //namedWindow("stream_client", CV_WINDOW_AUTOSIZE);
                        //flip(img0, img0, 1);
                        //cvtColor(img0, img1, CV_BGR2GRAY);

        while(key != 'q') {
                /* get a frame from camera */
                //capture >> img0;
                //if (img0.empty()) break;

                pthread_mutex_lock(&mutex);

		
                capture >> img0;
                if (img0.empty()) break;

                        //flip(img0, img0, 1);
                        //cvtColor(img0, img1, CV_BGR2GRAY);

                is_data_ready = 1;

                pthread_mutex_unlock(&mutex);

                /*also display the video here on client */
	
                imshow("stream_client", img2);
                key = waitKey(30);
        }

        /* user has pressed 'q', terminate the streaming client */
        if (pthread_cancel(thread_c)) {
                quit("\n--> pthread_cancel failed.", 1);
        }

        /* free memory */
        //destroyWindow("stream_client");
        quit("\n--> NULL", 0);
return 0;
}
Beispiel #30
0
void capture_image(VideoCapture capWebcam)
{
    time_duration td, td1;
    ptime nextFrameTimestamp, currentFrameTimestamp, initialLoopTimestamp, finalLoopTimestamp;
    int delayFound;

    // set framerate to record and capture at
    int framerate = 120;

    // Get the properties from the camera
    double width = capWebcam.get(CV_CAP_PROP_FRAME_WIDTH);
    double height = capWebcam.get(CV_CAP_PROP_FRAME_HEIGHT);

    // Create a matrix to keep the retrieved frame
    Mat frame;

    // Create the video writer
    VideoWriter video("capture.avi",CV_FOURCC('D','I','V','X'), framerate, cvSize(640,480) );

    // initialize initial timestamps
    nextFrameTimestamp = microsec_clock::local_time();
    currentFrameTimestamp = nextFrameTimestamp;
    td = (currentFrameTimestamp - nextFrameTimestamp);

    // start thread to begin capture and populate Mat frame
    boost::thread captureThread(captureFunc, &frame, &capWebcam);

    int i=0;
    for(;;)
    {

        // wait for X microseconds until 1second/framerate time has passed after previous frame write
        while(td.total_microseconds() < 1000000/framerate)
        {
        //determine current elapsed time
            currentFrameTimestamp = microsec_clock::local_time();
            td = (currentFrameTimestamp - nextFrameTimestamp);
        }

        //determine time at start of write
        initialLoopTimestamp = microsec_clock::local_time();

        if((frame.rows && frame.cols))
        {
            std::ostringstream strs;
            strs << i;
            std::string str = strs.str();
            putText(frame, strs.str() , cvPoint(100,100),FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(200,255,250), 1, CV_AA);
            waitKey(1) && 0xFF;
            i++;
        }

        // Save frame to video
        video << frame;
        //MyWindow obj;
        //obj.ui->label_5->setText("recording");

        // add 1second/framerate time for next loop pause
        nextFrameTimestamp = nextFrameTimestamp + microsec(1000000/framerate);

        // reset time_duration so while loop engages
        td = (currentFrameTimestamp - nextFrameTimestamp);

        //determine and print out delay in ms, should be less than 1000/FPS
        //occasionally, if delay is larger than said value, correction will occur
        //if delay is consistently larger than said value, then CPU is not powerful
        // enough to capture/decompress/record/compress that fast.
        finalLoopTimestamp = microsec_clock::local_time();
        td1 = (finalLoopTimestamp - initialLoopTimestamp);
        delayFound = td1.total_milliseconds();
        //cout << delayFound << endl;

        //output will be in following format
        //[TIMESTAMP OF PREVIOUS FRAME] [TIMESTAMP OF NEW FRAME] [TIME DELAY OF WRITING]

    }

    // Exit
}