void streaming_process(cv::VideoCapture vcap, ros::Publisher* _blob_publisher)
{
  Camera Cam;

  int counting=0;                         // Number of ellipsoide detected
  int falsepositive=0;                    // Number of falsepositive ellipsoide detected
  int zerovalue=0;
  int eps = 5;                          // Threeshold to detect falsepositive
  bool notvalidvalue=false;

  // Active signal (CTRL + C interrupt)
  struct sigaction sigIntHandler;
  sigIntHandler.sa_handler = my_handler;
  sigemptyset(&sigIntHandler.sa_mask);
  sigIntHandler.sa_flags = 0;
  sigaction(SIGINT, &sigIntHandler, NULL);

  Mat src;                              // Frame
  vector<Vec3f> circles;                // Vector of coordinates indicating x,y,r parameters of circles detected
  vector<RotatedRect> minEllipse;       // Vector of data indicating x,y coord of ellipses detected
  vector<RotatedRect> dataEllipses;

  std_msgs::Float64MultiArray cog_blobs;
  Eigen::MatrixXd blobs_matrix_data = Eigen::MatrixXd::Zero(2,3);


  /// Ellipses are detected considering rectangle shape

  while(counting<2){

    vcap >> src; // get a new frame from camera

    if(!vcap.read(src)) {
        std::cout << "No frame" << std::endl;
        waitKey();
    }

    /// Ellipses detection
    Cam.ellipsedetection(src, minEllipse, &counting);

    std::cout << "features detected: " << minEllipse.size() << std::endl;

    notvalidvalue = false;
    falsepositive = 0;
    zerovalue = 0;
    for(unsigned int n=0; n<counting; n++){


        for(unsigned int n2=0; n2<dataEllipses.size(); n2++){
            //std::cout << "compare (" << minEllipse[n].center.x << "," << minEllipse[n].center.y << ") con (" << dataEllipses[n2].center.x << "," << dataEllipses[n2].center.y << ")" << std::endl;
            if(fabs(minEllipse[n].center.x - dataEllipses[n2].center.x) < eps && fabs(minEllipse[n].center.y - dataEllipses[n2].center.y) < eps)
              {
              std::cout << "already detected" << std::endl;
              notvalidvalue = true;
              falsepositive++;
              break;
            }
          }
        if (notvalidvalue==false)
          {
          if(minEllipse[n].center.x!=0 && minEllipse[n].center.y!=0)
            dataEllipses.push_back(minEllipse[n]);
          else
            zerovalue++;
          }
      }

    std::cout << "false positive  " << falsepositive << std::endl;
    counting = counting - falsepositive - zerovalue;


    dataEllipses.clear();

    std::cout << "Real features detected: " << counting << std::endl;
    std::cout << " ------------ " << std::endl;



    }


  /// VISP PART: Tracking circles
  /// Once found out the circles, tell visp where they are

  Mat srcHSV, srcWB;
  vpImage<unsigned char> I; // for gray images
  vpImagePoint vImp;
  vpImagePoint vImp2;

    vpImageConvert::convert(src, I);  // convert Image from opencv to visp

  vpDisplayOpenCV d(I);
  vpDisplay::display(I);
  vpDisplay::flush(I);
  std::list<vpDot2> blob_list;
  vpDot2 blob;
  vpDot2 blob2;

  // assign blobs position
  vImp.set_u(dataEllipses[0].center.x);
  vImp.set_v(dataEllipses[0].center.y);
  vImp2.set_u(dataEllipses[1].center.x);
  vImp2.set_v(dataEllipses[1].center.y);

  // blobs definition
  blob.setGraphics(true);
  blob.setGraphicsThickness(1);
  blob.initTracking(I,vImp);

  blob.track(I);

  blob2.setGraphics(true);
  blob2.setGraphicsThickness(1);
  blob2.initTracking(I,vImp2);

  blob2.track(I);

  //   std::cout << "cog " << blob.getCog() << std::endl;
  //   std::cout << "cog2 " << blob2.getCog() << std::endl;

  double PPx = 330.98367;        // Principal point X
  double PPy = 273.41515;        // Principal point Y
  unsigned int iter = 1;

  while(!stop_interrupt) {

    vcap >> src; // get a new frame from camera
    vpImageConvert::convert(src, I);

    vpDisplay::display(I);
    blob.track(I);
    blob2.track(I);
    vpDisplay::displayCircle(I,PPy,PPx,10,vpColor::red) ;
    vpDisplay::displayCross(I, PPy,PPx, 20, vpColor::green) ;
    vpDisplay::flush(I);


    /// Publishing
    // blobs1 x (u)
    blobs_matrix_data(0,0) = blob.getCog().get_u();
    // blobs1 y (v)
    blobs_matrix_data(0,1) = blob.getCog().get_v();
    // blobs1 r
    blobs_matrix_data(0,2) = blob.getWidth()/2; // ray

    // blobs2 x (u)
    blobs_matrix_data(1,0) = blob2.getCog().get_u();
    // blobs2 y (v)
    blobs_matrix_data(1,1) = blob2.getCog().get_v();
    // blobs2 r
    blobs_matrix_data(1,2) = blob2.getWidth()/2; // ray

    // conversion to std_msgs
    tf::matrixEigenToMsg(blobs_matrix_data,cog_blobs);

    // Define the message to send
    _blob_publisher->publish(cog_blobs);
    std::cout << "ellipse 1: " << blobs_matrix_data(0,0) << " - " << blobs_matrix_data(0,1) << std::endl;
    std::cout << "ellipse 2: " << blobs_matrix_data(1,0) << " - " << blobs_matrix_data(1,1) << std::endl;

    ros::Duration(0.01).sleep();
    iter++;
  }


}
Ejemplo n.º 2
0
void OpenCVTemplateApp::makeGUI() {
    interface->clear();
    interface->addButton("load image", [this] {
        auto path = ci::app::getOpenFilePath();
        image = cv::imread(path.string());
        std::cout <<"cols "<<image.cols << std::endl;
        std::cout <<"rows "<<image.rows << std::endl;
        std::cout <<"channels "<<image.channels() << std::endl;
        imageTexture = gl::Texture::create(fromOcv(image));
    });
    interface->addButton("load video", [this] {
        auto path = ci::app::getOpenFilePath();
        video.open(path.string());
        frameWidth = video.get(cv::CAP_PROP_FRAME_WIDTH);
        frameHeight = video.get(cv::CAP_PROP_FRAME_HEIGHT);
        totalFrames = video.get(cv::CAP_PROP_FRAME_COUNT);
        video.read(frame);
        if(isGrayScale) {
            cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
        }
        frameTexture = gl::Texture::create(fromOcv(frame));
        makeGUI();
    });
    interface->addSeparator();
    if(frameTexture) {
        interface->addParam("gray scale", &isGrayScale).updateFn([this] {
            video.retrieve(frame);
            if(isGrayScale) {
                cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            }
            frameTexture = gl::Texture::create(fromOcv(frame));
            makeGUI();
        });
        interface->addParam("nb of feature",&nbOfFeaturePoints).min(1).max(1000);
        if(isGrayScale) {
            interface->addButton("get feature points", [this] {
                cv::goodFeaturesToTrack(frame, featurePoints, nbOfFeaturePoints, 0.01, 10, cv::Mat(), 3, 0, 0.04);
            });
        }
        interface->addSeparator();
        interface->addParam("frame",&frameIndex).min(0).max(totalFrames-1).step(1).updateFn([this] {
            video.set(cv::CAP_PROP_POS_FRAMES,frameIndex);
            video.read(frame);
            if(isGrayScale) {
                cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            }
            frameTexture = gl::Texture::create(fromOcv(frame));
        });
        interface->addSeparator();
        interface->addParam("speed", &frameSpeed).min(1).max(1000).step(1);
        interface->addButton("play",[this] {
            currentState = PLAY;
            makeGUI();
        });
        if(currentState == PLAY) {
            interface->addButton("pause",[this] {
                currentState = PAUSE;
                makeGUI();
            });
        }
    }
}
Ejemplo n.º 3
0
///////////////////////////////////////////////////////////////////////////////
// Generate polar coordinates from a processed video
///////////////////////////////////////////////////////////////////////////////
void sparse_optical_flow(){
	int count = 0;

	// loop through video
	while( vidCap.read( currentImage ) && count < end_frame ){
		count++;
		if( count < start_frame ) continue;
		if( count % every_n_frames != 0 ) continue;
		cout << "frame: " << count << "/" << end_frame << endl;

		cv::cvtColor( currentImage, currGray, CV_BGR2GRAY );
		currentImage.copyTo( drawTo );
		if( needToInit ) {
			goodFeaturesToTrack( currGray, points[1], 10000, 0.01, 3, cv::Mat(), 3, 0, 0.04 );
			cornerSubPix( currGray, points[1], cv::Size(10,10), cv::Size(-1, -1), termcrit );
			needToInit = false;
		}else if( !points[0].empty() ){
			vector<uchar> status;
			vector<float> err;
			if(prevGray.empty()){
					currGray.copyTo(prevGray);
			}
			calcOpticalFlowPyrLK( prevGray, currGray, points[0], points[1], status, err, cv::Size(5,5), 3, termcrit, 0, 0.001);
			size_t i, k;
			for( i = k = 0; i < points[1].size(); i++){
				if(!status[i]){
					continue;
				}

				points[1][k++] = points[1][i];

				float dist = distanceBetweenPoints( points[0][i], points[1][i] );

				if( dist > min_vector_length && dist < 10.00f ){

					// remove negative values
					if(points[0][i].x < 0.0f){
						points[0][i].x = 0.0f;
					}
					if(points[0][i].y < 0.0f){
						points[0][i].y = 0.0f;
					}
					if(points[1][i].x < 0.0f){
						points[1][i].x = 0.0f;
					}
					if(points[1][i].y < 0.0f){
						points[1][i].y = 0.0f;
					}

					// remove high values
					if(points[0][i].x >= width){
						points[0][i].x = width - 0.0001f;
					}
					if(points[0][i].y >= height){
						points[0][i].y = height - 0.0001f;
					}
					if(points[1][i].x >= width){
						points[1][i].x = width - 0.0001f;
					}
					if(points[1][i].y >= height){
						points[1][i].y = height - 0.0001f;
					}

					// dont use vectors that start and end in the same position
					if( (points[0][i].x == points[1][i].x) && (points[0][i].y == points[1][i].y) ){
						continue;
					}

					// draw vector
					cv::line(drawTo, points[0][i], points[1][i], cv::Scalar(0,0,255),1,8,0);
					// draw the head
					cv::circle(drawTo, points[1][i], 1, cv::Scalar(0,255,0), -1,8,0);
				}
			}
			points[1].resize(k);
		}
		cv::imshow("vectors", drawTo);
		cv::waitKey(1);

		swap(points[1], points[0]);
		swap(prevGray, currGray);
		swap(prevImage, currentImage);
	}
}
Ejemplo n.º 4
0
int main(int argc, char* argv[])
{
    signal(SIGINT, quitFunction);

    // Simple parsing of the parameters related to the image acquisition
    int xRes = 640;
    int yRes = 480;
    int cameraIndex = 0;
    if (argc > 2) {
        xRes = std::atoi(argv[1]);
        yRes = std::atoi(argv[2]);
    }
    if (argc > 3) {
        cameraIndex = std::atoi(argv[3]);
    }

    // The source of input images
    capture.open(cameraIndex);
    if (!capture.isOpened())
    {
        std::cerr << "Unable to initialise video capture." << std::endl;
        return 1;
    }
#ifdef OPENCV3
    capture.set(cv::CAP_PROP_FRAME_WIDTH, xRes);
    capture.set(cv::CAP_PROP_FRAME_HEIGHT, yRes);
#else
    capture.set(CV_CAP_PROP_FRAME_WIDTH, xRes);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, yRes);
#endif
    cv::Mat inputImage;

    // The tag detection happens in the Chilitags class.
    chilitags::Chilitags chilitags;

    // The detection is not perfect, so if a tag is not detected during one frame,
    // the tag will shortly disappears, which results in flickering.
    // To address this, Chilitags "cheats" by keeping tags for n frames
    // at the same position. When tags disappear for more than 5 frames,
    // Chilitags actually removes it.
    // Here, we cancel this to show the raw detection results.
    chilitags.setFilter(0, 0.0f);

    cv::namedWindow("DisplayChilitags");
    // Main loop, exiting when 'q is pressed'
    for (; 'q' != (char) cv::waitKey(1) && sRunning; ) {

        // Capture a new image.
        capture.read(inputImage);

        // Start measuring the time needed for the detection
        int64 startTime = cv::getTickCount();

        // Detect tags on the current image (and time the detection);
        // The resulting map associates tag ids (between 0 and 1023)
        // to four 2D points corresponding to the corners positions
        // in the picture.
        chilitags::TagCornerMap tags = chilitags.find(inputImage);

        // Measure the processing time needed for the detection
        int64 endTime = cv::getTickCount();
        float processingTime = 1000.0f*((float) endTime - startTime)/cv::getTickFrequency();


        // Now we start using the result of the detection.

        // First, we set up some constants related to the information overlaid
        // on the captured image
        const static cv::Scalar COLOR(255, 0, 255);
        // OpenCv can draw with sub-pixel precision with fixed point coordinates
        static const int SHIFT = 16;
        static const float PRECISION = 1<<SHIFT;

        // We dont want to draw directly on the input image, so we clone it
        cv::Mat outputImage = inputImage.clone();

        for (const std::pair<int, chilitags::Quad> & tag : tags) {

            int id = tag.first;
            // We wrap the corner matrix into a datastructure that allows an
            // easy access to the coordinates
            const cv::Mat_<cv::Point2f> corners(tag.second);

            // We start by drawing the borders of the tag
            for (size_t i = 0; i < 4; ++i) {
                cv::line(
                    outputImage,
                    PRECISION*corners(i),
                    PRECISION*corners((i+1)%4),
#ifdef OPENCV3
                    COLOR, 1, cv::LINE_AA, SHIFT);
#else
                    COLOR, 1, CV_AA, SHIFT);
#endif
            }

            // Other points can be computed from the four corners of the Quad.
            // Chilitags are oriented. It means that the points 0,1,2,3 of
            // the Quad coordinates are consistently the top-left, top-right,
            // bottom-right and bottom-left corners.
            // (i.e. clockwise, starting from top-left)
            // Using this, we can compute (an approximation of) the center of
            // tag.
            cv::Point2f center = 0.5f*(corners(0) + corners(2));
            cv::putText(outputImage, cv::format("%d", id), center,
                        cv::FONT_HERSHEY_SIMPLEX, 0.5f, COLOR);
        }

        // Some stats on the current frame (resolution and processing time)
        cv::putText(outputImage,
                    cv::format("%dx%d %4.0f ms (press q to quit)",
                               outputImage.cols, outputImage.rows,
                               processingTime),
                    cv::Point(32,32),
                    cv::FONT_HERSHEY_SIMPLEX, 0.5f, COLOR);

        // Finally...
        cv::imshow("DisplayChilitags", outputImage);
    }

    cv::destroyWindow("DisplayChilitags");
    capture.release();

    return 0;
}
Ejemplo n.º 5
0
void opencvLoop(){
    //read first frame
    stream1.read(frame1);
    if(!frame1.empty()){
        //convert frame1 to gray scale for frame differencing
        cvtColor(frame1, grayImage1, COLOR_BGR2GRAY);
    }
    //copy second frame
    stream1.read(frame2);
    //convert frame2 to gray scale for frame differencing
    cvtColor(frame2, grayImage2, COLOR_BGR2GRAY);
    //perform frame differencing with the sequential images. This will output an "intensity image"
    //do not confuse this with a threshold image, we will need to perform thresholding afterwards.
    absdiff(grayImage1, grayImage2, differenceImage);
    //threshold intensity image at a given sensitivity value
    threshold(differenceImage, thresholdImage, SENSITIVITY_VALUE, 255, THRESH_BINARY);
    if (debugMode == true) {
        //show the difference image and threshold image
        video[1] = differenceImage;            // imshow("Difference Image", differenceImage);
//        glutSetWindow(debugWindow1);
//        glutShowWindow();
        video[2] = thresholdImage;             //imshow("Threshold Image", thresholdImage);
    }
    else {
        //if not in debug mode, destroy the windows
        //glutHideWindow();
       // glutHideWindow(debugWindow2);
    }
    //blur() to smooth the image, remove noise
    blur(thresholdImage, thresholdImage, cv::Size(BLUR_SIZE, BLUR_SIZE));
    //threshold again to obtain binary image from blur output
    threshold(thresholdImage, thresholdImage, SENSITIVITY_VALUE, 255, THRESH_BINARY);
    //verifies that image is 8 bit for findcontours()
    thresholdImage.convertTo(thresholdImage, CV_8U);
    if (debugMode == true) {
        //show the threshold image after it's been "blurred"
        video[3] = thresholdImage;             //imshow("Final Threshold Image", thresholdImage);
    }
    else {
       // glutHideWindow(debugWindow3);
    }
    
    //if tracking enabled, search for contours in our thresholded image
    if (trackingEnabled) {
        //Collects a number of sample averages specified by SMOOTHING_SAMPLE and sticks them in the samplePoints global vector
        collectSamples(thresholdImage, frame1);
    }
    //when samplePoints vector reaches the size specified by SMOOTHING_SAMPLE, updates the current point and clears samplePoints
    if (samplePoints.size() == SMOOTHING_SAMPLE) {
        destination = pathSmoothing(frame1);
        samplePoints.clear();
    }
    
    if(trackingEnabled){
        targetPoints.push_back(current);
        vector<Point> temp;
        int counter = 0;
        
        //Draw Past motion of target//
        //if vector target only has one point, skip over it
        if(targetPoints.size() > 1){
            if(TRAILS >= targetPoints.size()){
                counter = targetPoints.size();
                //target size hasn't gotten that many trails yet
                //Loop through past points and draw last 3 lines of motion
                for(int i = 0; i+1<counter-1; i++){
                    line(frame1, targetPoints[i], targetPoints[i+1], (0,0,255), 2);
                    cout << "building up" << endl;
                }
            }
            else{
                counter = TRAILS;
                
                //Loop through past points and draw last lines of motion
                for(int i = 0; i+1<counter; i++){
                    line(frame1, targetPoints[targetPoints.size()-1-i], targetPoints[targetPoints.size()-2-i], (0,0,255), 2);
                    targetPoints[i] = targetPoints[i+1];
                }
            }
        }
    }
    
    
    //limits the speed of movement of the target crosshair. We can tweak SPEED_OF_MOVEMENT to accurately reflect the actual position of the laser in project 2
    //so that we can draw an obscuring circle over the laser
    current = speedGovernor(current, destination, SPEED_OF_MOVEMENT);
    Point predictLine = speedGovernor(current, destination, 100);
    
    //draw the target
    drawTarget(current, frame1);
    line(frame1, current, predictLine, Scalar(0, 255, 0), 2);
    
    video[0] = frame1;         //imshow("Frame1", frame1);
    glutPostRedisplay();
    
}
Ejemplo n.º 6
0
int SubsExtractor::run()
{

	namedWindow("Control", CV_WINDOW_AUTOSIZE);
    //createTrackbar("SF", "Control", &StartFrame, cap->get(CV_CAP_PROP_FRAME_COUNT), (void (*)(int,void *))&SubsExtractor::onSFtb, 0);
	createTrackbar("SF", "Control", &StartFrame, cap->get(CV_CAP_PROP_FRAME_COUNT), onSFtb, this);
	createTrackbar("EF", "Control", &EndFrame, 
                   cap->get(CV_CAP_PROP_FRAME_COUNT), 0, 0);
    createTrackbar("T1", "Control", &th1,255,NULL,0);
    createTrackbar("T2", "Control", &th2,255,NULL,0);

	int xmax = cap->get(CV_CAP_PROP_FRAME_WIDTH); 
    int ymax = cap->get(CV_CAP_PROP_FRAME_HEIGHT); 
	int x = xmax/2 - 50; int y = ymax - 110; 
	int xw = 100; int yh = 100;
	// 800x90+240+590 convert
	// (240,590) -> (240+800,590+90) = (1040,680)
	fprintf(stderr,"FRAME (%d %d) -> (%d %d)\n", x, y, x+xw, y+yh);
	fprintf(stderr,"STARTFRAME %d ENDFRAME %d\n", StartFrame, EndFrame);

	int subs;
	int frame = 0;
	char subtext[1024] = "";
	char same[] = " .   ";
	string f;
	char chronline[500];
	while(true) {
		if(!cap->read(img)) { 
    		cout << "Cannot read a frame from video stream" << endl;
			break;
		}
		if((frame = cap->get(CV_CAP_PROP_POS_FRAMES)) >= EndFrame) {
			cout << "Beyond EndFrame" << endl;
            break;
		}
        ////fprintf(stderr,"%ld\r",frame);
		subs = haysubs(x, x + xw, y, y + yh);
		fprintf(stderr,"subs %d\n", subs);
		switch(subs) {
			case SAME:
				//fprintf(stderr,"%s           \r", same + frame % 4);
				break;
			case START:
				if(ocr(subtext))
					setchron(cap->get(CV_CAP_PROP_POS_MSEC));
				//fprintf(stderr, "STR frame %ld\n",frame);
				break;
			case END:
				//fprintf(stderr,"END\n");
				getchron(cap->get(CV_CAP_PROP_POS_MSEC), chronline);
                                printf("%s\n%s\n\n",chronline,subtext);
				//fprintf(stderr, "END frame %d %s\n",frame,subtext);
				break;
			case CHANGE:
				//fprintf(stderr,"CHANGE\n");
                                //string s = getchron();
                                //setchron(cap->get(CV_CAP_PROP_POS_MSEC);
                                //imwrite(f, img);
                                //chron = cap->get(CV_CAP_PROP_POS_MSEC);
                                //intchron(CHRON_START,chron); 
				//fprintf(stderr, "CHG frame %d\n",frame);
				break;
			default:
				fprintf(stderr,"ERROR SUBS\n");
		}
		if (waitKey(30) == 27) {
			cout << "esc key pressed by user" << endl;
			break; 
		}
	}
	return 0;
}