示例#1
0
int main(int argc, char* argv[])
{
    VideoCapture cap;
    VideoWriter output;
    string inFile = "earth_4_orig.mov";
    Mat frame1, frame2, NewFrame;
    int ver = 2;
    int hor = 2;
    int frameCount = 1;
    bool quietMode = false;
    bool reportMode = false;
    bool displayMode = false;

    if(argc > 1)
    {
        for(int i = 1; i < argc; ++i)
        {
            if(strcmp(argv[i], "-f") == 0)
            {
                inFile = string(argv[++i]);
            }
            else if(strcmp(argv[i], "-v") == 0)
            {
                ver = atoi(argv[++i]);
            }
            else if(strcmp(argv[i], "-h") == 0)
            {
                hor = atoi(argv[++i]);
            }
            else if(strcmp(argv[i], "-q") == 0)
            {
                quietMode = true;
            }
            else if(strcmp(argv[i], "-r") == 0)
            {
                reportMode = true;
            }
            else if(strcmp(argv[i], "-d") == 0)
            {
                displayMode = true;
            }
            else
            {
                cout << "Invalid argument " << argv[i] << endl;
                printUsage();
            }
        }
    }
    else
    {
        printUsage();
        return -1;
    }

    cap.open(inFile);
    int maxFrame = cap.get(CV_CAP_PROP_FRAME_COUNT);
    int origWid = cap.get(CV_CAP_PROP_FRAME_WIDTH);
    int origHei = cap.get(CV_CAP_PROP_FRAME_HEIGHT);

    if(!cap.isOpened())
    {
        printf("!!! cvCaptureFromAVI failed (file not found?)\n");
        return -1;
    }
    int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC));
    Size S = Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH) -ver , (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT)-hor);
    //char key = 0;
    int first = 1;
    int last = 0;
    NewFrame = Mat::zeros(S, CV_32F);
    string::size_type pAt = inFile.find_last_of('.');   // Find extension point
    const string outFile = inFile.substr(0, pAt) + "-basic.mov";
    output.open(outFile, ex, cap.get(CV_CAP_PROP_FPS), S, true);

    clock_t startTime = clock();

    if(quietMode == false)
        cout << "Processing " << maxFrame << " frames..." << endl;

    //int fps = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
    while (/*key != 'q' && */ !last)
    {
        if(first ==1 )
        {
            cap >> frame1;
            if (frame1.empty())
            {
                printf("!!! cvQueryFrame failed: no frame\n");
                break;
            }
            first = 0;
            continue;
        }
        else
        {
示例#2
0
int main(int argc, const char** argv)
{
    VideoCapture vc;
    std::vector<char> s(4096);

    if (!vc.open(VIDEO_FILE)) {
        CV_Error_(-1, ("failed to open video: \"%s\"", VIDEO_FILE));
        std::exit(1);
    }

    int key = 0;
    bool pause = false;
    Point selection(-1000, -1000);
    Mat pristine, a, b;

    namedWindow(WINDOW_NAME, CV_WINDOW_NORMAL);
    resizeWindow(WINDOW_NAME,
                 (int)vc.get(CV_CAP_PROP_FRAME_WIDTH),
                 (int)vc.get(CV_CAP_PROP_FRAME_HEIGHT));
    setMouseCallback(WINDOW_NAME, onMouse, &selection);

    while (true)
    {
        if ((unsigned long) cvGetWindowHandle(WINDOW_NAME) == 0UL) {
            break;
        }

        if (!pause) {
            if (! vc.read(pristine)) {
                vc.set(CV_CAP_PROP_POS_FRAMES, 0U);
                vc.read(pristine);
            };
        }

        pristine.copyTo(a);

        Mat& post = a;

        rectangle(post,
                  selection - SELECT_HALF_SIZE,
                  selection + SELECT_HALF_SIZE,
                  Scalar(0,255,0), SELECT_LINE_WIDTH);

        std::sprintf(&s[0], "CNT: %5u", (unsigned) vc.get(CV_CAP_PROP_FRAME_COUNT));
        putText(post,
                &s[0],
                Point(vc.get(CV_CAP_PROP_FRAME_WIDTH)-200,TEXT_LINE_PITCH * 1),
                FONT_HERSHEY_PLAIN,
                1,
                Scalar(255,255,255));

        std::sprintf(&s[0], "F#:  %5u", (unsigned) vc.get(CV_CAP_PROP_POS_FRAMES));
        putText(post,
                &s[0],
                Point(vc.get(CV_CAP_PROP_FRAME_WIDTH)-200,TEXT_LINE_PITCH * 2),
                FONT_HERSHEY_PLAIN,
                1,
                Scalar(255,255,255));

        imshow(WINDOW_NAME, post);

        key = waitKey(1);

        if (key == 27) {
            break;
        }
        else if (key == 32) {
            pause = !pause;
        }

        if (key != -1) {
            std::cerr << "key=" << key << std::endl;
        }
    }

    vc.release();

    return 0;
}
示例#3
0
void skizImage::FrameMessage(VideoCapture cap)
{
	printf("MSEC = %lfs\t",cap.get(property(0))/1000);
	printf("FRAMES=%lf\t",cap.get(property(1)));
	printf("AVI_RATIO=%lf\n",cap.get(property(2)));
}
示例#4
0
int main(int argc, char** argv)
{
	// variable initialization
	int keyInput = 0;
	int nFrames = 0, nSmoothFrames = 0, nFailedFrames = 0, nBlindFrames = 0;
	int lastDx = 0, lastDy = 0;
	
	bool bOverlay = true;			// plot overlay?
	bool bTrace = true & bOverlay;	// plot 'bubble' trace? (only when overlay active)
	
	Ptr<BackgroundSubtractor> pMOG2;

	VideoCapture capture;		// input video capture
	VideoWriter outputVideo;	// output video writer

	Mat curFrame,		// current original frame
		fgMaskMOG2,		// foreground mask from MOG2 algorithm
		bgImg,			// container for background image from MOG2
		grayFrame,		// grayscale conversion of original frame
		frameDil,		// dilated grayscale frame
		canny_out;		// output of Canny algorithm for shape outline detection

	Mat *pOutMat = &curFrame;	// pointer to image that will be rendered once per input video frame
	Mat strucElem = getStructuringElement(MORPH_RECT, Size(3, 3)); // dilatation base element

	// containers for output of findContours()
	vector<Mat> contours;
	vector<Vec4i> hierarchy;
	
	// read video input filename from command line and construct output filename
	if (argc < 2) {
		cerr << "Please provide input video filename." << endl;
		return EXIT_FAILURE;
	}
	string filename(argv[1]);
	string outName = filename.substr(0, filename.length() - 4) + "_out.avi";

	Rect lastKnownRect, lastRect;
	Point lastKnownPos, lastPos, estimatePos, plotPos;
	list<Point> lastKnownPositions;

	// init 'live' video output window
	namedWindow("Motion tracking");

	// try to open input file
	capture.open(filename);
	if (!capture.isOpened()) {
		cerr << "Unable to open file '" << filename << "'." << endl;
		return EXIT_FAILURE;
	} else	{
		cout << "Successfully opened file '" << filename << "'." << endl;
	}

	// try to write to output file
	Size vidS = Size((int)capture.get(CV_CAP_PROP_FRAME_WIDTH), (int)capture.get(CV_CAP_PROP_FRAME_HEIGHT));
	outputVideo.open(outName, CV_FOURCC('P','I','M','1'), capture.get(CV_CAP_PROP_FPS), vidS, true);
	if (!outputVideo.isOpened()) {
		cerr << "Unable to write to output video." << endl;
		return EXIT_FAILURE;
	}

	// build frame buffer and background subtractor
	pMOG2 = createBackgroundSubtractorMOG2(500, 30., true);
	
	// main loop over frames
	while (capture.read(curFrame) && (char)keyInput != 'q')
	{
		++nFrames;
		
		cvtColor(curFrame, grayFrame, CV_BGR2GRAY);	// convert to grayscale
		threshold(grayFrame, grayFrame, 128., 0., CV_THRESH_TRUNC); // try to mitigate (white) reflections by truncating the current frame
		GaussianBlur(grayFrame, grayFrame, Size(7, 7), 0, 0);

		pMOG2->apply(grayFrame, fgMaskMOG2);
		
		// erode and dilate to remove some noise
		erode(fgMaskMOG2, frameDil, strucElem);
		dilate(frameDil, frameDil, strucElem);

		// dilate and erode to remove holes from foreground
		dilate(frameDil, frameDil, strucElem);
		erode(frameDil, frameDil, strucElem);

		// canny to find foreground outlines
		Canny(frameDil, canny_out, 100, 200, 3);

		// find contours, sort by contour size (descending)
		findContours(canny_out, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, Point(0, 0)); // find contours
		sort(contours.begin(), contours.end(), rvs_cmp_contour_area); // sort by contour area, beginning with the largest

		// determine largest "moving" object
		int iMaxSize = 0;
		bool bFoundCloseContour = false;
		for (unsigned int i = 0; i < contours.size(); i++)
		{
			if (contourArea(contours[i]) < CONTOUR_AREA_THRESH) // ignore contours which are too small (noise)
				break;

			// ignore contours which are too far away from the last frame
			Rect boun = boundingRect(contours[i]); // bounding rect
			Point bounCenter = (boun.tl() + boun.br())/2;

			if (i == 0) // preemptively save largest contour to get back to if no "close" contour is found.
			{
				lastRect = boun;
				lastPos = bounCenter;
			}

			// distance validity check, but only if we recently had track of the object
			if (nFrames > 1 && nFailedFrames < 10)
			{
				int dx = bounCenter.x - lastPos.x;
				int dy = bounCenter.y - lastPos.y;
				int dist2 = dx*dx + dy*dy;
				//cout << bounCenter << " " << lastPos << endl;
				if (dist2 > DELTA_SQ_THRESH) // too far away... try next contour
					continue;
			}

			lastRect = boun;
			lastPos = bounCenter;
			bFoundCloseContour = true;
			++nSmoothFrames;
			break;
		}

		if (contours.size() == 0) {
			// we don't see anything.
			++nBlindFrames;
		} else { nBlindFrames = 0; }

		// update last known position if smooth transition occured
		if (bFoundCloseContour) {
			nFailedFrames = 0;
			lastDx = lastPos.x - lastKnownPos.x;
			lastDy = lastPos.y - lastKnownPos.y;

			lastKnownRect = lastRect;
			lastKnownPos = lastPos;

			plotPos = lastKnownPos;

			if (bTrace) { // draw trace
				if (lastKnownPositions.size() > LAST_POS_BUFFER_SIZE)
					lastKnownPositions.pop_front();
				lastKnownPositions.push_back(lastPos);
				
				list<Point>::iterator it;
				int i = 0;
				for (it = lastKnownPositions.begin(); it != lastKnownPositions.end(); it++)	{
					Scalar color(180, 90, 30);
					circle(*pOutMat, *it, 5, color, 2 * i);
					++i;
				}
			}
		} else {
			++nFailedFrames;
			// guess based on velocity extrapolation
			estimatePos.x = lastKnownPos.x + nFailedFrames*lastDx;
			estimatePos.y = lastKnownPos.y + nFailedFrames*lastDy;

			if (estimatePos.x < 0 || estimatePos.y < 0 || estimatePos.x >= capture.get(CV_CAP_PROP_FRAME_WIDTH) ||
				estimatePos.y >= capture.get(CV_CAP_PROP_FRAME_HEIGHT || nFailedFrames >= 10)) {
				// we've totally lost track, cancel velocity extrapolation guess
				plotPos = lastKnownPos;
				nFailedFrames = 0;
			} else {
				plotPos = estimatePos;
			}
		}

		// draw overlay (rect frame, mid point and text)
		if (bOverlay) {
			if (nBlindFrames < 6 && bFoundCloseContour) {
				circle(*pOutMat, plotPos, 5, Scalar(255, 120, 0), 10, 8);
				rectangle(*pOutMat, lastKnownRect, Scalar(0, 255, 0), 3);
			}

			vector<ostringstream> text(4);
			const int lineSkip = 16;
			text[0] << "Frame: " << nFrames; // frame counter
			text[1] << "Object X: " << lastKnownPos.x; // moving object coordinates
			text[2] << "Object Y: " << lastKnownPos.y;
			text[3] << "Smooth rate: " << setprecision(3) << 100.0*nSmoothFrames / nFrames << "%"; // tracking percentage

			for (unsigned int line = 0; line < text.size(); line++) {
				putText(*pOutMat, text[line].str(), Point(10, 22 + line*lineSkip), CV_FONT_HERSHEY_PLAIN, 1., Scalar(180., 0., 0.));
			}
		}
		
		// cleanup temporary vectors (VS2013 stability issues)
		contours.clear();
		hierarchy.clear();

		outputVideo << *pOutMat; // add output video frame
		imshow("Motion tracking", *pOutMat); // draw frame
		keyInput = waitKey(5); // allow time for event loop
	}

	// release files
	outputVideo.release(); 
	capture.release();

	return EXIT_SUCCESS;
}
示例#5
0
/**
 * @brief main function
 *
 * @return 0
 */
int main(void)
{
    HelpMain();

    HelpSelectCamera();
    
    VideoCapture cap;
    if (camera_device_index != -1) {
        /// open camera
        cap = VideoCapture(camera_device_index);
    } else {
        /// open video
        cout << "please input video name with pathname: ";
        String video_name;
        cin >> video_name;
        cout << "the video name is :" << video_name << endl;
        cap = VideoCapture(video_name);
    }
    if(!cap.isOpened()) {
        cout << "Can not find the default camera from you computer!\n";
        cin.get();
        return -1;
    }

    /// wait for camera to get ready
    waitKey(2000);

    /// video information
    totalFrameNumber = cap.get(CV_CAP_PROP_FRAME_COUNT);
    frameToStart = 30;
    frameToStop = 140;
    rate = cap.get(CV_CAP_PROP_FPS);
    currentFrame = frameToStart;
    delay = 1000/rate;
    if (camera_device_index == -1) {
        cap.set( CV_CAP_PROP_POS_FRAMES,frameToStart);
    }
    /// read a frame to get the camera image state
    cap.read(frame);
    resize(frame, frame, Size(640,480));
    cout << "image height = " << frame.rows << endl;
    cout << "image width = " << frame.cols << endl;
    cout << "image channel = " << frame.channels() << endl;
    imshow("camera", frame);
    cout << "camera/video open success\n";
    waitKey(30);

    HelpCaptureImage();

    /// start show the camera
    char key = -1;
    for(;;) {
        Mat frame;
        cap.read(frame);
        frame = vehicle_detection_system(frame);
        currentFrame++;
        imshow("camera", frame);
        key = waitKey(30);
        /// exit programe
        if(key == 27) {
            destroyAllWindows();
            break;
        }
        /// capture image
        if(key == 'S' || key == 's') {
            capture_color_image = frame.clone();
            cvtColor(capture_color_image, capture_gray_image,
                    cv::COLOR_BGR2GRAY);
            // imshow("color", capture_color_image);
            // imshow("gray", capture_gray_image);
            // imwrite("color.bmp", capture_color_image);
            // imwrite("gray.bmp", capture_gray_image);
        }
    }
    cap.release();
    return 0;
}
示例#6
0
void VideoDemos( VideoCapture& surveillance_video, int starting_frame, bool clean_binary_images )
{
	Mat previous_gray_frame, optical_flow, optical_flow_display;
	Mat current_frame, thresholded_image, closed_image, first_frame;
	Mat current_frame_gray, running_average_background;
	Mat temp_running_average_background, running_average_difference;
	Mat running_average_foreground_mask, running_average_foreground_image;
	Mat selective_running_average_background;
	Mat temp_selective_running_average_background, selective_running_average_difference;
	Mat selective_running_average_foreground_mask, selective_running_average_background_mask, selective_running_average_foreground_image;
	double running_average_learning_rate = 0.01;
	surveillance_video.set(CV_CAP_PROP_POS_FRAMES,starting_frame);
	surveillance_video >> current_frame;
	first_frame = current_frame.clone();
	cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);
	current_frame.convertTo(running_average_background, CV_32F);
	selective_running_average_background = running_average_background.clone();
	int rad = running_average_background.depth();
	MedianBackground median_background( current_frame, (float) 1.005, 1 );
	Mat median_background_image, median_foreground_image;

	int codec = static_cast<int>(surveillance_video.get(CV_CAP_PROP_FOURCC));
	// V3.0.0 update on next line.  OLD CODE was    BackgroundSubtractorMOG2 gmm; //(50,16,true);
    Ptr<BackgroundSubtractorMOG2> gmm = createBackgroundSubtractorMOG2();
	Mat foreground_mask, foreground_image = Mat::zeros(current_frame.size(), CV_8UC3);

	double frame_rate = surveillance_video.get(CV_CAP_PROP_FPS);
	double time_between_frames = 1000.0/frame_rate;
	Timestamper* timer = new Timestamper();
	int frame_count = 0;
	while ((!current_frame.empty()) && (frame_count++ < 1000))//1800))
    {
 		double duration = static_cast<double>(getTickCount());
		vector<Mat> input_planes(3);
		split(current_frame,input_planes);
		cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);

		if (frame_count%2 == 0)  // Skip every second frame so the flow is greater.
		{
			if ( previous_gray_frame.data )
			{
				Mat lucas_kanade_flow;
				timer->ignoreTimeSinceLastRecorded();
				LucasKanadeOpticalFlow(previous_gray_frame, current_frame_gray, lucas_kanade_flow);
				timer->recordTime("Lucas Kanade Optical Flow");
				calcOpticalFlowFarneback(previous_gray_frame, current_frame_gray, optical_flow, 0.5, 3, 15, 3, 5, 1.2, 0);
				cvtColor(previous_gray_frame, optical_flow_display, CV_GRAY2BGR);
				drawOpticalFlow(optical_flow, optical_flow_display, 8, Scalar(0, 255, 0), Scalar(0, 0, 255));
				timer->recordTime("Farneback Optical Flow");
				char frame_str[100];
				sprintf( frame_str, "Frame = %d", frame_count);
 				Mat temp_output = JoinImagesHorizontally( current_frame, frame_str, optical_flow_display, "Farneback Optical Flow", 4 );
				Mat optical_flow_output = JoinImagesHorizontally( temp_output, "", lucas_kanade_flow, "Lucas Kanade Optical Flow", 4 );
				imshow("Optical Flow", optical_flow_output );
			}
			std::swap(previous_gray_frame, current_frame_gray);
		}
	
		// Static background image
		Mat difference_frame, binary_difference;
		Mat structuring_element(3,3,CV_8U,Scalar(1));
		timer->ignoreTimeSinceLastRecorded();
		absdiff(current_frame,first_frame,difference_frame);
		cvtColor(difference_frame, thresholded_image, CV_BGR2GRAY);
		threshold(thresholded_image,thresholded_image,30,255,THRESH_BINARY);
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,binary_difference,MORPH_OPEN,structuring_element);
			current_frame.copyTo(binary_difference, thresholded_image);
		}
		else
		{
			binary_difference.setTo(Scalar(0,0,0));
		    current_frame.copyTo(binary_difference, thresholded_image);
		}
		timer->recordTime("Static difference");

		// Running Average (three channel version)
		vector<Mat> running_average_planes(3);
		split(running_average_background,running_average_planes);
		accumulateWeighted(input_planes[0], running_average_planes[0], running_average_learning_rate);
		accumulateWeighted(input_planes[1], running_average_planes[1], running_average_learning_rate);
		accumulateWeighted(input_planes[2], running_average_planes[2], running_average_learning_rate);
		merge(running_average_planes,running_average_background);
		running_average_background.convertTo(temp_running_average_background,CV_8U);
		absdiff(temp_running_average_background,current_frame,running_average_difference);
		split(running_average_difference,running_average_planes);
		// Determine foreground points as any point with a difference of more than 30 on any one channel:
		threshold(running_average_difference,running_average_foreground_mask,30,255,THRESH_BINARY);
		split(running_average_foreground_mask,running_average_planes);
		bitwise_or( running_average_planes[0], running_average_planes[1], running_average_foreground_mask );
		bitwise_or( running_average_planes[2], running_average_foreground_mask, running_average_foreground_mask );
		if (clean_binary_images)
		{
			morphologyEx(running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
		running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(running_average_foreground_image, running_average_foreground_mask);
		timer->recordTime("Running Average");

		// Running Average with selective update
		vector<Mat> selective_running_average_planes(3);
		// Find Foreground mask
		selective_running_average_background.convertTo(temp_selective_running_average_background,CV_8U);
		absdiff(temp_selective_running_average_background,current_frame,selective_running_average_difference);
		split(selective_running_average_difference,selective_running_average_planes);
		// Determine foreground points as any point with an average difference of more than 30 over all channels:
		Mat temp_sum = (selective_running_average_planes[0]/3 + selective_running_average_planes[1]/3 + selective_running_average_planes[2]/3);
		threshold(temp_sum,selective_running_average_foreground_mask,30,255,THRESH_BINARY_INV);
		// Update background
		split(selective_running_average_background,selective_running_average_planes);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate,selective_running_average_foreground_mask);
    	invertImage(selective_running_average_foreground_mask,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		merge(selective_running_average_planes,selective_running_average_background);
		if (clean_binary_images)
		{
			morphologyEx(selective_running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,selective_running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
 		selective_running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(selective_running_average_foreground_image, selective_running_average_foreground_mask);
		timer->recordTime("Selective Running Average");

		// Median background
		timer->ignoreTimeSinceLastRecorded();
		median_background.UpdateBackground( current_frame );
		timer->recordTime("Median");
		median_background_image = median_background.GetBackgroundImage();
		Mat median_difference;
		absdiff(median_background_image,current_frame,median_difference);
		cvtColor(median_difference, median_difference, CV_BGR2GRAY);
		threshold(median_difference,median_difference,30,255,THRESH_BINARY);
		median_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(median_foreground_image, median_difference);

		// Update the Gaussian Mixture Model
 		// V3.0.0 update on next line.  OLD CODE was  gmm(current_frame, foreground_mask);
        gmm->apply(current_frame, foreground_mask);
		// Clean the resultant binary (moving pixel) mask using an opening.
		threshold(foreground_mask,thresholded_image,150,255,THRESH_BINARY);
		Mat moving_incl_shadows, shadow_points;
		threshold(foreground_mask,moving_incl_shadows,50,255,THRESH_BINARY);
		absdiff( thresholded_image, moving_incl_shadows, shadow_points );
		Mat cleaned_foreground_mask;
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,cleaned_foreground_mask,MORPH_OPEN,structuring_element);
		}
		else cleaned_foreground_mask = thresholded_image.clone();
 		foreground_image.setTo(Scalar(0,0,0));
        current_frame.copyTo(foreground_image, cleaned_foreground_mask);
		timer->recordTime("Gaussian Mixture Model");
		// Create an average background image (just for information)
        Mat mean_background_image;
		timer->ignoreTimeSinceLastRecorded();
		// V3.0.0 update on next line.  OLD CODE was   gmm.getBackgroundImage(mean_background_image);
        gmm->getBackgroundImage(mean_background_image);

		duration = static_cast<double>(getTickCount())-duration;
		duration /= getTickFrequency()/1000.0;
		int delay = (time_between_frames>duration) ? ((int) (time_between_frames-duration)) : 1;
		char c = cvWaitKey(delay);
		
		char frame_str[100];
		sprintf( frame_str, "Frame = %d", frame_count);
		Mat temp_static_output = JoinImagesHorizontally( current_frame, frame_str, first_frame, "Static Background", 4 );
		Mat static_output = JoinImagesHorizontally( temp_static_output, "", binary_difference, "Foreground", 4 );
        imshow("Static Background Model", static_output );
 		Mat temp_running_output = JoinImagesHorizontally( current_frame, frame_str, temp_running_average_background, "Running Average Background", 4 );
		Mat running_output = JoinImagesHorizontally( temp_running_output, "", running_average_foreground_image, "Foreground", 4 );
		imshow("Running Average Background Model", running_output );
 		Mat temp_selective_output = JoinImagesHorizontally( current_frame, frame_str, temp_selective_running_average_background, "Selective Running Average Background", 4 );
		Mat selective_output = JoinImagesHorizontally( temp_selective_output, "", selective_running_average_foreground_image, "Foreground", 4 );
        imshow("Selective Running Average Background Model", selective_output );
 		Mat temp_median_output = JoinImagesHorizontally( current_frame, frame_str, median_background_image, "Median Background", 4 );
		Mat median_output = JoinImagesHorizontally( temp_median_output, "", median_foreground_image, "Foreground", 4 );
        imshow("Median Background Model", median_output );
  		Mat temp_gaussian_output = JoinImagesHorizontally( current_frame, frame_str, mean_background_image, "GMM Background", 4 );
		Mat gaussian_output = JoinImagesHorizontally( temp_gaussian_output, "", foreground_image, "Foreground", 4 );
        imshow("Gaussian Mixture Model", gaussian_output );
		timer->putTimes( current_frame );
		imshow( "Computation Times", current_frame );
	 	surveillance_video >> current_frame;
	}
	cvDestroyAllWindows();
}
示例#7
0
文件: video.hpp 项目: verzhak/stend
		inline double fps() { return video.get(CV_CAP_PROP_FPS); };
int main(int argc, char *argv[]) {
	ros::init(argc, argv, "verify_tracking_node");
	ros::NodeHandle n;
	std::string port;
	ros::param::param<std::string>("~port", port, "/dev/ttyACM0");
	int baud;
	ros::param::param<int>("~baud", baud, 57600);
	ros::Rate loop_rate(10);

	ros::Publisher servox_pub = n.advertise<std_msgs::Char>("servox_chatter", 1000);
	ros::Publisher servoy_pub = n.advertise<std_msgs::Char>("servoy_chatter", 1000);
	ros::Publisher motor_pub = n.advertise<std_msgs::Char>("motor_chatter", 1000);

	ros::Publisher verify_pub = n.advertise<std_msgs::Char>("verify_chatter", 1);

	Subscriber track_pub = n.subscribe("track_chatter", 1, trackCallback);
	Subscriber host_sub = n.subscribe("host_chatter", 1, hostCallback);
	
	cv_result_t cv_result = CV_OK;
	int main_return = -1;
	cv_handle_t handle_detect = NULL;
	cv_handle_t handle_track = NULL;
	VideoCapture capture;
	double time;
	capture.open(0);         // open the camera
	if (!capture.isOpened()) {
		fprintf(stderr, "Verify track can not open camera!\n");
		return -1;
	}
	capStatus = OPEN;
	int frame_width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
	int frame_height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
    int frame_half_width = frame_width >> 1;
	int frame_half_height = frame_height >> 1;
	//printf("width %d height %d \n", frame_width, frame_height);
	Point expect(frame_half_width , frame_half_height);
	struct timeval start0, end0;
	struct timeval start1, end1;
	struct timeval start2, end2;
	struct timeval start3, end3;
	struct timeval start4, end4;
	struct timeval start5, end5;
#ifdef TIME
		gettimeofday(&start0, NULL);
#endif
	cv_handle_t handle_verify = cv_verify_create_handle("data/verify.tar");
#ifdef TIME
		gettimeofday(&end0, NULL);
		time = COST_TIME(start0, end0);
		printf("get from verify tar time cost = %.2fs \n", time / 1000000);
#endif
#if 1
	const int person_number = 5;
	Mat p_image_color_1[person_number], p_image_color_color_1[person_number], p_image_color_2, p_image_color_color_2;
	Mat tmp_frame;
	cv_face_t *p_face_1[person_number];
	cv_face_t *p_face_2;
	int face_count_1[person_number] = {0};
	int face_count_2 = 0;
	cv_feature_t *p_feature_1[person_number];
	cv_feature_t *p_feature_new_1[person_number];
	unsigned int feature_length_1[person_number];
	p_image_color_1[0] = imread("00.JPG");
	p_image_color_1[1] = imread("01.JPG");
	p_image_color_1[2] = imread("02.JPG");
	p_image_color_1[3] = imread("03.JPG");
	p_image_color_1[4] = imread("04.JPG");
	char *string_feature_1[person_number];
#else
	Mat p_image_color_2, p_image_color_color_2;

	const int person_number = 4;
	cv_face_t *p_face_2 = NULL;
	vector<cv_face_t *>p_face_1(person_number,NULL);
	vector<int>face_count_1(person_number, 0);
	int face_count_2 = 0;
	vector<Mat>p_image_color_1(person_number);
	vector<Mat>p_image_color_color_1(person_number);
	vector<cv_feature_t *>p_feature_1(person_number, NULL);
	vector<cv_feature_t *>p_feature_new_1(person_number, NULL);
	vector<unsigned int>feature_length_1(person_number, 0);
	// load image
	p_image_color_1.push_back(imread("01.JPG"));
	p_image_color_1.push_back(imread("02.JPG"));
	p_image_color_1.push_back(imread("03.JPG"));
	p_image_color_1.push_back(imread("04.JPG"));
	char *string_feature_1[person_number];
#endif

	for(int i = 0; i < person_number; i++)
	{
		if (!p_image_color_1[i].data ) {
			fprintf(stderr, "fail to read %d image \n", i);
			//return -1;
			goto RETURN;
		}
	}
	for(int i = 0; i < person_number; i++)
		cvtColor(p_image_color_1[i], p_image_color_color_1[i], CV_BGR2BGRA);
	// init detect handle
	handle_detect = cv_face_create_detector(NULL, CV_FACE_SKIP_BELOW_THRESHOLD | CV_DETECT_ENABLE_ALIGN);
	if (!handle_detect) {
		fprintf(stderr, "fail to init detect handle\n");
		goto RETURN;
		//return -1;
	}
	// detect
#ifdef TIME
		gettimeofday(&start1, NULL);
#endif
	for(int i = 0; i < person_number; i++)
		cv_result = cv_face_detect(handle_detect, p_image_color_color_1[i].data, CV_PIX_FMT_BGRA8888,
			p_image_color_color_1[i].cols, p_image_color_color_1[i].rows, p_image_color_color_1[i].step,
			CV_FACE_UP, &p_face_1[i], &face_count_1[i]);
#ifdef TIME
		gettimeofday(&end1, NULL);
		time = COST_TIME(start1, end1);
		printf("face detect from db time cost = %.2fs \n", time / 1000000);
#endif
	if (cv_result != CV_OK) {
		fprintf(stderr, "st_face_detect error : %d\n", cv_result);
		goto RETURN;
		//return -1;
	}
	for(int i = 0; i < person_number; i++)
	{
		if(face_count_1[i] == 0){
			fprintf(stderr, "can't find face in db %d", i);
			goto RETURN;
		}
	}
	if (handle_verify) {
#ifdef TIME
		gettimeofday(&start2, NULL);
#endif
		
	for(int i = 0; i < person_number; i++)
		cv_result = cv_verify_get_feature(handle_verify, p_image_color_color_1[i].data, CV_PIX_FMT_BGRA8888,
				p_image_color_color_1[i].cols,
				p_image_color_color_1[i].rows, p_image_color_color_1[i].step, p_face_1[i], &p_feature_1[i],
				&feature_length_1[i]);
#ifdef TIME
		gettimeofday(&end2, NULL);
		time = COST_TIME(start2, end2);
		printf("get feature from db time cost = %.2fs \n", time / 1000000);
#endif
	}
	else {
		fprintf(stderr, "fail to init verify handle, check for the model file!\n");
		goto RETURN;
	}
	for(int i = 0; i < person_number; i++)
	{
		if (feature_length_1[i] > 0) {
			cv_feature_header_t *p_feature_header = CV_FEATURE_HEADER(p_feature_1[i]);
			fprintf(stderr, "Feature information:\n");
			fprintf(stderr, "    ver:\t0x%08x\n", p_feature_header->ver);
			fprintf(stderr, "    length:\t%d bytes\n", p_feature_header->len);

			// test serial and deserial
			string_feature_1[i] = new char[CV_ENCODE_FEATURE_SIZE(p_feature_1[i])];
			cv_verify_serialize_feature(p_feature_1[i], string_feature_1[i]);
			p_feature_new_1[i] = cv_verify_deserialize_feature(string_feature_1[i]);
			delete []string_feature_1[i];
		}
		else {
			fprintf(stderr, "error, the feature length [%d]is 0!\n", i);
		}
	}
	handle_track = cv_face_create_tracker(NULL, CV_FACE_SKIP_BELOW_THRESHOLD);
	if (!handle_track) {
		fprintf(stderr, "fail to init track handle\n");
		goto RETURN;
	}
	//namedWindow("TrackingTest");
	//while (capture.read(p_image_color_2)) {
	
	while(capture.isOpened()) {
		for(int i = 0; i < 2; i++)
		{
			capture.read(tmp_frame);
		}
        tmp_frame.copyTo(p_image_color_2); 
		resize(p_image_color_2, p_image_color_2, Size(frame_width, frame_height), 0, 0, INTER_LINEAR);
		cvtColor(p_image_color_2, p_image_color_color_2, CV_BGR2BGRA);

#ifdef TIME
			gettimeofday(&start3, NULL);
#endif
			//printf("begin to detect from camera\n");
		cv_result = cv_face_detect(handle_detect, p_image_color_color_2.data, CV_PIX_FMT_BGRA8888,
				p_image_color_color_2.cols, p_image_color_color_2.rows, p_image_color_color_2.step,
				CV_FACE_UP, &p_face_2, &face_count_2);
#ifdef TIME
			gettimeofday(&end3, NULL);
			time = COST_TIME(start3, end3);
			printf("face detect from camera time cost = %.2fs \n", time / 1000000);
#endif
		if (cv_result != CV_OK) {
			fprintf(stderr, "st_face_detect error : %d\n", cv_result);
			goto RETURN;
		}
		spinOnce();

		if(host_flag == '0')
		{
			printf("host_flag = %c\n", host_flag);
			continue;
		}
		else
			printf("host_flag = %c\n", host_flag);

		// verify the first face
		if (face_count_2 > 0) {
			cv_feature_t *p_feature_2 = NULL;
			vector<float>score(person_number, 0);
			unsigned int feature_length_2;
			// get feature
			//printf("begin to get feature from camera\n");
#ifdef TIME
			gettimeofday(&start4, NULL);
#endif
			printf("begin to get feataure from camera\n");
			cv_result = cv_verify_get_feature(handle_verify, p_image_color_color_2.data, CV_PIX_FMT_BGRA8888,
					p_image_color_color_2.cols,
					p_image_color_color_2.rows, p_image_color_color_2.step, p_face_2, &p_feature_2,
					&feature_length_2);
#ifdef TIME
			gettimeofday(&end4, NULL);
			time = COST_TIME(start4, end4);
			printf("get feature from camera time cost = %.2fs \n", time / 1000000);
#endif

			if ( feature_length_2 > 0) {
				char *string_feature_2 = new char[CV_ENCODE_FEATURE_SIZE(p_feature_2)];
				cv_verify_serialize_feature(p_feature_2, string_feature_2);
				cv_feature_t *p_feature_new_2 = cv_verify_deserialize_feature(string_feature_2);
				delete []string_feature_2;

				// compare feature
#ifdef TIME
				gettimeofday(&start5, NULL);
#endif
				printf("begin to compare feature with db\n");
				for(int i = 0; i < person_number; i++)
				{
					cv_result = cv_verify_compare_feature(handle_verify, p_feature_new_1[i],
						p_feature_new_2, &score[i]);
				}
#ifdef TIME
				gettimeofday(&end5, NULL);
				time = COST_TIME(start5, end5);
				printf("compare feature time cost = %.2fms \n", time / 1000);
#endif
				if (cv_result == CV_OK) {
					float max_score = score[0];
					int max_id = 0;
					for(int i = 1; i < person_number; i++)
					{
						if(score[i] > max_score)
						{
							max_score = score[i];
							max_id = i;
						}
					}

					fprintf(stderr, "max score: %f\n", max_score);
					// comapre score with DEFAULT_THRESHOLD
					// > DEFAULT_THRESHOLD => the same person
					// < DEFAULT_THRESHOLD => different people
					if (max_score > DEFAULT_THRESHOLD)
					{
						fprintf(stderr, "you are the right person, your number is %d\n", max_id);
						capStatus = Verified;
						// send verify_flag msg to verify chatter
						verify_flag = '1';
						verify.data = verify_flag;
						verify_pub.publish(verify);
						//printf("verify node publish verify flag %c to speech node\n", verify_flag);
						spinOnce();
						printf("track flag %c\n", track_flag);
						if(track_flag == '0')
							continue;
						int track_value = face_track(capture, expect, frame_width, frame_height, handle_track, servox_pub, servoy_pub,motor_pub);
						if(track_value == -1)
						{
							printf("no face detected !, verified frome start!\n");
							verify_flag = '0';
							verify.data = verify_flag;
							verify_pub.publish(verify);
							track_flag = '0';
							host_flag = '0';
							continue;
						}
					}
					else
					{
						fprintf(stderr, "no you are not right person .\n");
						verify_flag = '0';
						verify.data = verify_flag;
						verify_pub.publish(verify);
						track_flag = '0';
						host_flag = '0';
						continue;
					}
				} else {
					fprintf(stderr, "cv_verify_compare_feature error : %d\n", cv_result);
				}
				cv_verify_release_feature(p_feature_new_2);
			} else {
				fprintf(stderr, "error, the feature length is 0!\n");
			}
			cv_verify_release_feature(p_feature_2);
		} else {
			fprintf(stderr, "no face in camera\n");
			verify_flag = '0';
			verify.data = verify_flag;
			verify_pub.publish(verify);
			track_flag = '0';
			host_flag = '0';
			printf("verify_flag = %c, host_flag = %c\n", verify_flag, host_flag);
			continue;
		}
#if 0
		Scalar scalar_color = CV_RGB(p_face[i].ID * 53 % 256,
				p_face[i].ID * 93 % 256,
				p_face[i].ID * 143 % 256);
		rectangle(temp_frame, Point2f(static_cast<float>(p_face[i].rect.left),
					static_cast<float>(p_face[i].rect.top)),
				Point2f(static_cast<float>(p_face[i].rect.right),
					static_cast<float>(p_face[i].rect.bottom)), scalar_color, 2);
#endif
#if 0
		imshow("TrackingTest", p_image_color_2);
		if (waitKey(5) == 27 )
			break;
#endif
		// release the memory of feature

	}
	for(int i = 1; i < person_number; i++)
	{
		cv_verify_release_feature(p_feature_new_1[i]);
		cv_verify_release_feature(p_feature_1[i]);
	}
	// destroy verify handle
	cv_verify_destroy_handle(handle_verify);
RETURN:
	// release the memory of face
	for(int i = 1; i < person_number; i++)
		cv_face_release_detector_result(p_face_1[i], face_count_1[i]);
	cv_face_release_detector_result(p_face_2, face_count_2);
	// destroy detect handle
	cv_face_destroy_detector(handle_detect);

	fprintf(stderr, "test finish!\n");
	return main_return;
}
int main(int argc, char** argv){
  Mat image;
  int width, height;
  VideoCapture cap;
    
  vector<Mat> planes;
  Mat histR, histG, histB;
  int nbins = 64;
  float range[] = {0, 256};
  const float *histrange = { range };
  bool uniform = true;
  bool acummulate = false;

  cap.open(0);  //seleciona a camera
  
  
  if(!cap.isOpened()){
    cout << "cameras indisponiveis";
    return -1;
  }
  
  width  = cap.get(CV_CAP_PROP_FRAME_WIDTH);
  height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);

  cout << "largura = " << width << endl;
  cout << "altura  = " << height << endl;

  int histw = nbins, histh = nbins/2;
  Mat histImgR(histh, histw, CV_8UC3, Scalar(0,0,0));
  Mat histImgG(histh, histw, CV_8UC3, Scalar(0,0,0));
  Mat histImgB(histh, histw, CV_8UC3, Scalar(0,0,0));
  
  while(1){   
    
    cap >> image;
    //Redimensionar a captura
    resize(image, image, Size(640, 360));
    ///////////////////////////////////////////////////////////////////////////////////////////////////
    // EQUALIZAÇÃO
    /////////////////////////////////////////////////////////////////////////////////////////////////// 
    
    //Separa a imagem capturada em três canais que são armazenados em "planes" 
    split (image, planes);  
    
    //Equalização das capturas
    equalizeHist(planes[0], planes[0]);
    equalizeHist(planes[1], planes[1]);
    equalizeHist(planes[2], planes[2]);    
        
    //Utilizamos  a função merge() para unir os planos ou canais equalizados em image.
    merge(planes, image);   
    
    /////////////////////////////////////////////////////////////////////////////////////////////////// 
    
    
    calcHist(&planes[0], 1, 0, Mat(), histR, 1,
             &nbins, &histrange,
             uniform, acummulate);
    calcHist(&planes[1], 1, 0, Mat(), histG, 1,
             &nbins, &histrange,
             uniform, acummulate);
    calcHist(&planes[2], 1, 0, Mat(), histB, 1,
             &nbins, &histrange,
             uniform, acummulate);

    normalize(histR, histR, 0, histImgR.rows, NORM_MINMAX, -1, Mat());
    normalize(histG, histG, 0, histImgG.rows, NORM_MINMAX, -1, Mat());
    normalize(histB, histB, 0, histImgB.rows, NORM_MINMAX, -1, Mat());

    histImgR.setTo(Scalar(0));
    histImgG.setTo(Scalar(0));
    histImgB.setTo(Scalar(0));
    
    for(int i=0; i<nbins; i++){
      line(histImgR,
           Point(i, histh),
           Point(i, histh-cvRound(histR.at<float>(i))),
           Scalar(0, 0, 255), 1, 8, 0);
      line(histImgG,
           Point(i, histh),
           Point(i, histh-cvRound(histG.at<float>(i))),
           Scalar(0, 255, 0), 1, 8, 0);
      line(histImgB,
           Point(i, histh),
           Point(i, histh-cvRound(histB.at<float>(i))),
           Scalar(255, 0, 0), 1, 8, 0);
    }
    histImgR.copyTo(image(Rect(0, 0       ,nbins, histh)));
    histImgG.copyTo(image(Rect(0, histh   ,nbins, histh)));
    histImgB.copyTo(image(Rect(0, 2*histh ,nbins, histh)));
    imshow("image", image);
    if(waitKey(30) >= 0) break;
  }
  return 0;
}
示例#10
0
int main (int argc, char** argv) {

  // Mat img1 = imread(argv[1], 0);
  // Mat img2 = imread(argv[2], 0);
  // resize(img1, img1, Size(0.5*img1.cols, 0.5*img1.rows));
  // resize(img2, img2, Size(0.5*img2.cols, 0.5*img2.rows));
  // clearNearbyOpticalflow(&img1, 420);
  // clearNearbyOpticalflow(&img2, 420);
  // cout << "hello world" << endl;
  // Mat img1_disp, img2_disp;
  // getDisparity(img1, img2, img1_disp, img2_disp);
  // imshow ("img1", img1);
  // imshow("img1 disp", img1_disp);
  // imshow("img2 disp", img2_disp);
  // string im1_disp = argv[1];
  // im1_disp = im1_disp.substr(0, im1_disp.length()-4)+ "_disp.png";
  // imwrite(im1_disp, img1_disp);

  // waitKey(0);
  // return 0;

  help();
  ofstream ofs("log.txt");
  ofstream ofs_rt;
  ofstream ofs_x;
  ofstream ofs_y;
  ofstream ofs_z;

  if (ofs.bad()) {
    cerr << "file cannot open" << endl;
    exit(1);
  }
  // sequence directory
  bool save_flag = false;
  if (argc>=3) {
    save_flag = true;
  }
  VideoCapture cap;
  char* in_filepath = argv[1];
  char* bn = basename(in_filepath);

  cap.open(in_filepath);
  if (!cap.isOpened()) {
    cout << "Could not initialize capturing...\n";
    exit(1);
  }

  float scale = 1.0;
  if (argc >= 4) {
    scale = atof(argv[3]);
  }
  float pitch = -0.08;
  if (argc >= 5) {
    pitch = atof(argv[4]);
  }
  int start_frame = 1;
  if (argc >= 6) {
    start_frame = atoi(argv[5]);

  }
  string base_dir;
  if (save_flag) {
    base_dir = argv[2];
    if (base_dir[base_dir.length()-1]=='/')
      base_dir += bn;
    else
      base_dir = base_dir+'/'+bn;
    stringstream ss;
    ss << "_" << scale << "_" << pitch;
    base_dir = base_dir.substr(0, base_dir.length()-4)+ss.str();
    cout << base_dir << endl;

    string cmd = "mkdir -p " + base_dir;
    system(cmd.c_str());

    ofs_x.open(base_dir+"/"+"x.txt");
    ofs_y.open(base_dir+"/"+"y.txt");
    ofs_z.open(base_dir+"/"+"z.txt");
    ofs_rt.open(base_dir+"/"+"rt.txt");
    base_dir += "/frame_";
  }

  char numstr[256] = {0};
  int frame_index = 0;
  // set most important visual odometry parameters
  // for a full parameter list, look at: viso_mono.h
  VisualOdometryMono::parameters param;

  param.calib.f = 733.2/scale;
  param.calib.cu = 444.261*scale;
  param.calib.cv = 277.314*scale;
  param.height = 1.6;
  // param.pitch  = -0.08;

  param.pitch = pitch;
  param.bucket.max_features = 1000;
  int32_t first_frame = 0;
  int32_t last_frame = 8800;
  int32_t step = 3;
  // init visual odometry
  VisualOdometryMono viso(param);

  Reconstruction re3d;
  re3d.setCalibration(param.calib.f, param.calib.cu, param.calib.cv);

  // current pose (this matrix transforms a point from the current
  // frame's camera coordinates to the first frame's camera coordinates)
  Matrix pose = Matrix::eye(4);

  // loop through all frames
  // for (int32_t i=first_frame; i < last_frame; i+=step) {
  Mat gray_frame, optical_flow;
  Mat left_img, right_img;
  Mat left_img_disp8, right_img_disp8;

  const int k_factor = 23;
  int SKIPPED_FRAMES = 2;
  int velocity_world;
  cap.set(CV_CAP_PROP_POS_FRAMES, start_frame);
  // intrinsic param
  // Mat camera_matrix, dist_coeffs, remap_x, remap_y;

  // FileStorage fs_in;
  // fs_in.open("calibration.yml", FileStorage::READ);
  // fs_in["camera matrix"] >> camera_matrix;
  // fs_in["distorted coeffs"] >> dist_coeffs;
  // cout << camera_matrix << endl;
  // cout << dist_coeffs << endl;
  cout << scale << "\t" << pitch << "\t" << start_frame << endl;

  Mat r1, r2, t1, t2;
  Mat prev_r(3, 1, CV_64FC1);
  Mat prev_prev_r(3, 1, CV_64FC1);
  Mat prev_t(3, 1, CV_64FC1);
  Mat prev_prev_t(3, 1, CV_64FC1);
  double kkk = 0.7;
  int counter = 0;
  Mat frame;
  for ( ;; ) {
    cap >> frame;
    if (frame.empty())
      return 0;
    char* speed = NULL;
    velocity_world = detectSpeed(frame, &speed) / 3.6f; // m/s
    if (velocity_world>0) {
      SKIPPED_FRAMES = k_factor/velocity_world;
      if (SKIPPED_FRAMES < 2)
        SKIPPED_FRAMES = 2;
      else if (SKIPPED_FRAMES > 15)
        SKIPPED_FRAMES = 15;
    }
    else
      SKIPPED_FRAMES = 15;

    for (int i = 1; i < SKIPPED_FRAMES; i++) {
      cap >> frame;
      if (frame.empty())
        return 0;
    }

    cout << "speed: " << velocity_world << endl;
    unsigned int curr_frame = (unsigned int)cap.get(CV_CAP_PROP_POS_FRAMES);
    // status
    cout << "Processing: Frame: " << curr_frame << endl;// cap.get(CV_CAP_PROP_POS_FRAMES) << endl;

    // catch image read/write errors here
    resize(frame, frame, Size(0.5*frame.cols, 0.5*frame.rows));
    // undistorted image

    clearNearbyOpticalflow(&frame, 420);
    cvtColor(frame, gray_frame, COLOR_BGR2GRAY);
    int width = gray_frame.cols;
    int height = gray_frame.rows;
    // convert input images to uint8_t buffer
    uint8_t* gray_frame_data = (uint8_t*)malloc(width*height*sizeof(uint8_t));
    int32_t k=0;
    for (int32_t v=0; v<height; v++) {
      for (int32_t u=0; u<width; u++) {
        gray_frame_data[k++] = gray_frame.at<char>(v, u);
      }
    }

    vector<Matcher::p_match> p_matched = viso.getMatches();
    // compute visual odometry
    int32_t dims[] = {width,height,width};
    if (viso.process(gray_frame_data,dims)) {
      // on success, update current pose
      // Matrix pose_tmp = Matrix::eye(4);
      // pose_tmp = pose * Matrix::inv(viso.getMotion());
      // float fl_pre[16] = {0};
      // float fl_cur[16] = {0};
      // for (int i = 0; i < 4; i ++ )
      //   for (int j = 0; j < 4; j ++) {
      //     fl_pre[i*4+j] = pose.val[i][j];
      //     fl_cur[i*4+j] = pose_tmp.val[i][j];
      //   }
      // Mat Rt1 = Mat(4, 4, CV_32FC1, &fl_pre);
      // Mat Rt2 = Mat(4, 4, CV_32FC1, &fl_cur);
      // Mat R1 = Mat(Rt1, Range(0, 3), Range(0, 3));
      // Mat R2 = Mat(Rt2, Range(0, 3), Range(0, 3));
      // Mat Rx1, Rxx1, Rx2, Rxx2;
      // Rodrigues(R1, Rx1);
      // Rodrigues(R2, Rx2);
      // if (fabs(Rx1.at<float>(0, 0) - Rx2.at<float>(0, 0)) > 0.02) {
      //   Rx2.at<float>(0, 0) = param.pitch;
      // }
      // Rodrigues(Rx2, Rxx2);
      // double dl[16] = {0};
      // for (int i=0; i<3; i ++) {
      //   for (int j=0; j<3; j ++) {
      //     dl[i*4+j] = Rxx2.at<float>(i, j);
      //   }
      // }
      // dl[3] = fl_cur[3];      dl[7] = fl_cur[7];
      // dl[11] = fl_cur[11];      dl[15] = 1;
      // dl[12] = dl[13] = dl[14] = 0;
      // Matrix refine_RT = Matrix(4, 4, dl);
      // pose = refine_RT;
      counter ++;
      if (counter > 3) {
        Mat r_predict(3, 1, CV_64FC1);
        r_predict.at<double>(0, 0) = prev_r.at<double>(0, 0)*2-prev_prev_r.at<double>(0, 0);
        r_predict.at<double>(1, 0) = prev_r.at<double>(1, 0)*2-prev_prev_r.at<double>(1, 0);
        r_predict.at<double>(2, 0) = prev_r.at<double>(2, 0)*2-prev_prev_r.at<double>(2, 0);
        cout << "r_predict: \n" << r_predict << endl;

        Matrix pose_calc = pose * Matrix::inv(viso.getMotion());
        double dl[16] = {0};
        for (int i=0; i<4; ++ i)
          for (int j=0; j<4; ++ j) {
            dl[4*i+j] = pose_calc.val[i][j];
          }
        Mat pose_calc_mat(4, 4, CV_64FC1, dl);
        cout << "pose: \n" << pose << endl;
        cout << "pose_calc_mat: \n" << pose_calc_mat << endl;

        Mat r_calc = Mat(pose_calc_mat, Range(0, 3), Range(0, 3));
        Mat r_degree(3, 1, CV_64FC1);
        Rodrigues(r_calc, r_degree);
        cout << "r_calc: \n" << r_degree << endl;
        r_degree.at<double>(0, 0) = kkk*r_degree.at<double>(0, 0) + (1.0-kkk)*r_predict.at<double>(0, 0);
        r_degree.at<double>(1, 0) = kkk*r_degree.at<double>(1, 0) + (1-kkk)*r_predict.at<double>(1, 0);
        r_degree.at<double>(2, 0) = kkk*r_degree.at<double>(2, 0) + (1.0-kkk)*r_predict.at<double>(2, 0);
        cout << "r update: \n" << r_degree << endl;
        cout << "pose: \n" << pose << endl;
        cout << "pose_calc: \n" << pose_calc << endl;
        Rodrigues(r_degree, r_calc);

        // Mat t_predict(3, 1, CV_64FC1);
        Mat t_calc = Mat(pose_calc_mat, Range(0, 3), Range(3, 4));
        // cout << "prev_t: \n" << prev_t << "\nprev_prev_t: \n" << prev_prev_t << endl;
        // t_predict.at<double>(0, 0) = prev_t.at<double>(0, 0)*2-prev_prev_t.at<double>(0, 0);
        // t_predict.at<double>(1, 0) = prev_t.at<double>(1, 0)*2-prev_prev_t.at<double>(1, 0);
        // t_predict.at<double>(2, 0) = prev_t.at<double>(2, 0)*2-prev_prev_t.at<double>(2, 0);
        // cout << "t_predict: \n" << t_predict << endl;

        // cout << "t_calc: \n" << t_calc << endl;
        // t_calc.at<double>(0, 0) = kkk*t_calc.at<double>(0, 0) + (1-kkk)*t_predict.at<double>(0, 0);
        // t_calc.at<double>(1, 0) = kkk*t_calc.at<double>(1, 0) + (1-kkk)*t_predict.at<double>(1, 0);
        // t_calc.at<double>(2, 0) = kkk*t_calc.at<double>(2, 0) + (1-kkk)*t_predict.at<double>(2, 0);
        // cout << "t update: \n" << t_calc << endl;

        for (int i = 0; i < 3; i ++ ) {
          for (int j = 0; j < 3; j ++) {
            pose.val[i][j] = r_calc.at<double>(i, j);
          }
          pose.val[i][3] = t_calc.at<double>(i, 0);
        }
        cout << "pose update: \n" << pose << endl;
        // cout << "r_calc: \n" << r_calc << endl;
        // cout << "t_calc: \n" << t_calc << endl;
        // cout << "r_degree: \n" << r_degree << endl;

        prev_prev_r = prev_r;
        // prev_prev_t = prev_t;
        prev_r = r_degree;
        // prev_t = t_calc;
      } else {
        pose = pose*Matrix::inv(viso.getMotion());
        prev_prev_r = prev_r;
        prev_prev_t = prev_t;
        double dl[16] = {0};
        for (int i=0; i<4; ++ i)
          for (int j=0; j<4; ++ j) {
            dl[4*i+j] = pose.val[i][j];
          }
        Mat tmp(4, 4, CV_64FC1, dl);
        Mat tmp_r(tmp, Range(0, 3), Range(0, 3));
        // Mat tmp_t(tmp, Range(0, 3), Range(3, 4));
        Rodrigues(tmp_r, prev_r);
        // prev_t = tmp_t;
      }

      //  right_img = gray_frame;
      right_img = gray_frame.clone();
      if (!left_img.empty()) {
        getDisparity(left_img, right_img, left_img_disp8, right_img_disp8);
        imshow("left disp8", left_img_disp8);
        getDisparity(right_img, left_img, left_img_disp8, right_img_disp8);
        imshow("right disp8", right_img_disp8);
      }
      // left_img = right_img;
      cv::swap(left_img, right_img);

      // output some statistics
      double num_matches = viso.getNumberOfMatches();
      double num_inliers = viso.getNumberOfInliers();
      // reconstruction from 3d
      re3d.update(p_matched, viso.getMotion(), 1, 2, 30, 3);
      vector<Reconstruction::point3d> p = re3d.getPoints();
      if (save_flag) {
        sprintf(numstr, "%d", frame_index ++ );
        string filename = base_dir + numstr + ".pcd";
        save3dPointsAsPCD(p, filename);
      }
      // struct triangulateio in, out;
      // cout << ", Matches: " << num_matches << endl;
      // cout << ", Inliers: " << 100.0*num_inliers/num_matches << " %" << endl;
      // cout << "p_matched.size(): " << p_matched.size() << endl;
      // cout << "p.size(): " << p.size() << endl;
      // cout << pose << endl << endl;

      // ofs << ", Matches: " << num_matches;
      // ofs << ", Inliers: " << 100.0*num_inliers/num_matches << " %" << ", Current pose: " << endl;
      ofs_rt << pose << endl << endl;
      ofs_x << pose.val[0][3] << endl;
      ofs_y << pose.val[1][3] << endl;
      ofs_z << pose.val[2][3] << endl;

    } else {
      cout << " ... failed!" << endl;
      // ofs << " ... failed!" << endl;
    }
    imshow("frame", gray_frame);
    optical_flow = Mat::zeros(gray_frame.rows, gray_frame.cols, CV_8UC3);
    drawMatched(p_matched, optical_flow);
    imshow("matched", optical_flow);
    // prev_image
    // left_img = gray_frame;
    // release uint8_t buffers
    free(gray_frame_data);
    if (waitKey(10)==27 || waitKey(10)==0x20)
      break;

    cout << "-----------------------------" << endl;
  }
  // output
  cout << "Demo complete! Exiting ..." << endl;

  // exit

  return 0;
}
int main() 
{
	
	VideoCapture capture = VideoCapture(CV_CAP_OPENNI);
	Size size = Size(capture.get(CV_CAP_PROP_FRAME_WIDTH),capture.get(CV_CAP_PROP_FRAME_HEIGHT));
	int codec = CV_FOURCC('D', 'I', 'V', 'X');    
	VideoWriter writer("video.avi",codec,capture.get(CV_CAP_PROP_FPS),size,0);

	namedWindow( "COLOR", CV_WINDOW_AUTOSIZE );
	namedWindow( "wireframe", CV_WINDOW_AUTOSIZE );
	namedWindow( "FILT", CV_WINDOW_AUTOSIZE );
	namedWindow( "BlobCenters", CV_WINDOW_AUTOSIZE );

	moveWindow("COLOR", 10, 10);
	moveWindow("wireframe", 710, 10);
	moveWindow("FILT", 10, 540);
	moveWindow("BlobCenters", 710, 540);
	
    if(writer.isOpened())
    {

    	Mat depthMap;
		Mat bgrImage ;
		Mat filtered;
		Mat filtered2;
		Point centerOfHand;

		//Motion History Mats
		Mat blobCenters = Mat::zeros(size,CV_8U);
		imshow("BlobCenters",blobCenters);
		int prevX, prevY = -1;

		vector<Point> scatter;

					vector<Point> scatter1;
					scatter1.push_back(Point(200,300));
					scatter1.push_back(Point(210,310));
					scatter1.push_back(Point(220,320));
					scatter1.push_back(Point(230,330));
					scatter1.push_back(Point(240,340));


		bool foundHand;
		clock_t gestureTimer;
		seconds_count=0;

		int X_Displacement=0;
		int Y_Displacement=0;

        while ( 1 )
		{
			capture.grab();
			capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP );
			capture.retrieve( bgrImage, CV_CAP_OPENNI_BGR_IMAGE );
		
			//imshow("depthmap",depthMap);
			//Find the minimum value greater than 0 in the matrix
			//TEST SECTION

			flip(depthMap,depthMap,1);
			flip(bgrImage,bgrImage,1);

			MatConstIterator_<unsigned short> it = depthMap.begin<unsigned short>(), it_end = depthMap.end<unsigned short>();
			unsigned short minVal=60000;
			
			for(;it != it_end; ++it){
				if(*it<minVal && *it>0){
					minVal=*it;
				}
			}			

			//cout << "minVal: " <<minVal<<endl;
			unsigned short minRange = minVal-30;
			unsigned short maxRange = minVal+60;


			//cout << "min,max: "<<minRange<<", "<<maxRange<<endl;

			//Perhaps just create another mat with size 8u. This seems to be what happens when

			Mat thtwBitDepth;// = cvCreateImage(size,IPL_DEPTH_32F,0);

			depthMap.convertTo(thtwBitDepth,CV_32F);//,1.0/256,0);

			
			//imshow("32 Bit",thtwBitDepth);

			filtered2 = thresholdDistance(thtwBitDepth,minRange,maxRange);
			filtered2 = thresholdDistance(filtered2,25,900);
			

			//imshow("ThresholdDistance",filtered2);

			//END TEST SECTION

			//inRange(depthMap,25,800,filtered);
			//filtered2 = filtered.clone();
			filtered2 = smoothImage(filtered2);
			imshow("FILT",filtered2);


			Mat thtwsfiltered;// = cvCreateImage(size,IPL_DEPTH_8U,0);
			filtered2.convertTo(thtwsfiltered,CV_8U);
			filtered2 = thtwsfiltered.clone();
			
			if(!foundHand){
				foundHand = findHand(thtwsfiltered, bgrImage, centerOfHand);
				//foundHand = findHand(filtered2, bgrImage);
				if(foundHand) gestureTimer=clock();
				//cout << "found hand = "<< foundHand << endl;
			} else {
				
				//THIS IS THE DEPTH AT THE POINT WHERE THE CENTER OF THE HAND WAS CALCULATED
				//cout  << "depth: " << depthMap.at<unsigned short>(centerOfHand) << endl;

				//A hand was detected and now a gesture is being analyzed.

				//Find center of mass of all blobs in window and draw a circle on them.
				//This image will be fed to the motion history functions.

				std::vector<std::vector<Point> > contours;

				findContours(filtered2,contours,CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);

            	for (int i = 0; i < contours.size(); i++) {

                	vector<Point> contour = contours[i];
                	Mat contourMat = Mat(contour);
                	double cArea = contourArea(contourMat);

                	if(cArea > 4000 && cArea < 30000) // likely the hand
                	{
                    	Scalar center = mean(contourMat);
                    	Point centerPoint = Point(center.val[0], center.val[1]);
						//Point centerPoint = Point(50, 50);
						
						if(prevX>=0 && prevY>=0 && center.val[0]>=0 && center.val[1]>=0){
							line(blobCenters,centerPoint,Point(prevX,prevY),Scalar(255,255,255),30);
							line(bgrImage,centerPoint,Point(prevX,prevY),Scalar(0,255,0),15);
							X_Displacement += (center.val[0]-prevX);
							Y_Displacement += (center.val[1]-prevY);
						}

						prevX = center.val[0];
						prevY = center.val[1];
						scatter.push_back(centerPoint);
						//cout<<scatter<<endl;
						//cout <<"Displacement(x,y): "<<X_Displacement<<","<<Y_Displacement<<endl;
						//circle(bgrImage, centerPoint, 8, Scalar(255,0,0), -1);
					}
				}

				for (int i = 0;i<scatter.size();i++){
					circle(bgrImage, scatter[i], 8, Scalar(255,0,0), -1);
				}
/*
				for (int i = 0;i<scatter1.size();i++){
					circle(bgrImage, scatter1[i], 8, Scalar(255,0,0), -1);
				}
*/
				//circle(bgrImage, scatter[0], 8, Scalar(255,0,0), -1);
				//circle(bgrImage, scatter[scatter.size()-1], 8, Scalar(255,0,0), -1);


				if(X_Displacement>160 || X_Displacement<-160 || Y_Displacement>120 || Y_Displacement<-120){
					

					//Call the linearRegression function to get the slope of the line of best fit and the correlation coefficient.
					double corrCoef=0;


					//double slope = linearRegression(scatter1, &corrCoef);
					double slope = linearRegression(scatter, &corrCoef);
					//cout<<scatter<<endl;
					//cout<<"slope: "<<slope<<"  Corr"<<abs(corrCoef)<<endl;
					if(!scatter.empty()){
						int first_x = scatter[0].x;
						int first_y = scatter[0].y;
						int last_x = scatter[scatter.size()-1].x;
						int last_y = slope*(last_x - first_x) + first_y;
						//cout<<scatter[0]<<endl;
						//cout<<last_x<<" "<<last_y<<endl;
						line(bgrImage,Point(first_x,first_y),Point(last_x,last_y),Scalar(0,255,255),10);
					}
/*
					if(!scatter1.empty()){
						int first_x = scatter1[0].x;
						int first_y = scatter1[0].y;
						int last_x = scatter1[scatter1.size()-1].x;
						int last_y = slope*(last_x - first_x) + first_y;
						//cout<<scatter1[0]<<endl;
						//cout<<last_x<<" "<<last_y<<endl;
						line(bgrImage,Point(first_x,first_y),Point(last_x,last_y),Scalar(0,255,255),10);
					}
*/
					if(abs(corrCoef)>.1 && slope > -.5 && slope < .5){
						//This is a horizontal line. Match it with a horizontal swipe.
						if(X_Displacement>160){
							//Right swipe
							cout<<"next"<<endl;
						}
						if(X_Displacement<-160){
							//Left swipe
							cout<<"prev"<<endl;
						}
					}

					if(abs(corrCoef)>.1 && (slope > 2 || slope < -2) ){
						//This is a vertical line. Match it with a vertical swipe.
						if(Y_Displacement>120){
							//Down swipe
							cout<<"voldown 3"<<endl;
						}
						if(Y_Displacement<-120){
							//Up swipe
							cout<<"volup 3"<<endl;
						}
					}

					imshow("temp",bgrImage);

					scatter.clear();
					gestureTimer=0;
					foundHand=0;
					prevX=-1;
					prevY=-1;
					X_Displacement=0;
					Y_Displacement=0;
					blobCenters = Mat::zeros(size,CV_8U);
				}

				//cout << "Clock: "<<(clock()-gestureTimer)/CLOCKS_PER_SEC<<endl;
				if(((clock()-gestureTimer)/CLOCKS_PER_SEC)>=1){
					//Gesture time has exceeded 10 seconds. Give up on finding gesture.
					scatter.clear();
					gestureTimer=0;
					foundHand=0;
					prevX=-1;
					prevY=-1;
					X_Displacement=0;
					Y_Displacement=0;
					blobCenters = Mat::zeros(size,CV_8U);
				}

				imshow("BlobCenters",blobCenters);

				//NOTE: Need to add a check to determine if a gesture was determined correctly.
				//      If it was gestureTimer and foundHand both need to be set to 0.

			}

			/*WRITE TO FILE*/
			//writer.write(filtered);

			/*DISPLAY IMAGES*/
			imshow("COLOR",bgrImage);
			
			//imshow("FILT",thtwsfiltered);

            if(waitKey(100)>=0)
			{
				break;
			}
        }
	}
    else
    {
       	cout<<"ERROR while opening"<<endl;
    }
	writer.release();
	capture.release();

	
	
    return 0;
}
示例#12
0
int main(int argc, char* argv[])
{
    VideoCapture cap;
    VideoWriter output;
    string inFile = "88_7_orig.mov";
    
    int ver = 2;
    int hor = 2;
    int frameCount = 1;
    int maxFrames;
    bool quietMode = false;
    bool reportMode = false;
    bool displayMode = false;
    char *numWorkers = NULL;
    struct timeval startTime, endTime;

    if(argc > 1)
    {
        for(int i = 1; i < argc; ++i)
        {
            if(strcmp(argv[i], "-f") == 0)
            {
                inFile = argv[++i];
            }
            else if(strcmp(argv[i], "-h") == 0)
            {
                hor = atoi(argv[++i]);
            }
            else if(strcmp(argv[i], "-w") == 0)
            {
                numWorkers = argv[++i];
            }
            else if(strcmp(argv[i], "-v") == 0)
            {
                ver = atoi(argv[++i]);
            }
            else if(strcmp(argv[i], "-q") == 0)
            {
                quietMode = true;
            }
            else if(strcmp(argv[i], "-r") == 0)
            {
                reportMode = true;
            }
            else if(strcmp(argv[i], "-d") == 0)
            {
                displayMode = true;
            }
            else
            {
                cerr << "Unknown flag: " << argv[i] << endl;
                printUsage();
            }
        }
    }
    else
    {
        printUsage();
        return -1;
    }

    if(numWorkers == NULL)
        numWorkers = (char *)"2";

    if (0!= __cilkrts_set_param("nworkers", numWorkers))
    {
        printf("Failed to set worker count\n");
        return 1;
    }

    cap.open(inFile);
    if(!cap.isOpened())
    {
        cerr << "Unable to open input file." << endl;
        return -1;
    }
    maxFrames = cap.get(CV_CAP_PROP_FRAME_COUNT);
    int origWid = cap.get(CV_CAP_PROP_FRAME_WIDTH);
    int origHei = cap.get(CV_CAP_PROP_FRAME_HEIGHT);

    int ex = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC));
    Size S = Size((int)cap.get(CV_CAP_PROP_FRAME_WIDTH) -ver , (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT)-hor);
    //char key = 0;
    int first = 1, second = 1, third = 1;
    int last = 0;

    string::size_type pAt = inFile.find_last_of('.');   // Find extension point
    const string outFile = inFile.substr(0, pAt) + "-temp4.mov";
    output.open(outFile, ex, cap.get(CV_CAP_PROP_FPS), S, true);

    Mat *frames = new Mat[maxFrames];
    Mat *outFrames = new Mat[maxFrames];

    for(int i = 0; i < maxFrames; ++i)
    {
        cap >> frames[i];
        if(frames[i].empty())
        {
            cout << "Error: unable to read frame " << i << endl;
            return 1;
        }
    }

    if(quietMode == false)
        cout << "Processing " << maxFrames << " frames..." << endl;

    //clock_t startTime = clock();
    gettimeofday(&startTime, NULL);

    // This is the main loop which computes the retargeted frames
    cilk_for(int i = 0; i < maxFrames; ++i)
    {
        Mat frame1, frame2;
        if(quietMode == false)
            cout << "Frame " << frameCount++ << "/" << maxFrames << endl;

        frame1 = frames[i];
        if(i < maxFrames - 1)
            frame2 = frames[i+1];
        else
            frame2 = frame1;

        outFrames[i] = ReduceFrame(frame1, frame2, ver, hor);
    }

    //clock_t endTime = clock();
    gettimeofday(&endTime, NULL);

    for(int i = 0; i < maxFrames; ++i)
    {
        output<<outFrames[i];
    }

    if(reportMode == true)
    {
        cout << "Input file: " << inFile << "\tOutput file: " << outFile << endl;
        cout << "Dimension: " << origWid << "x" << origHei << "\tFrames: " << maxFrames << endl;
        cout << "Seams carved: " << ver << "x" << hor << endl;
        //cout << "Elapsed time: " << (endTime - startTime)/CLOCKS_PER_SEC << endl;
        cout << "Elapsed time: " << (endTime.tv_sec*1000000 + (endTime.tv_usec)) - 
            (startTime.tv_sec*1000000 + (startTime.tv_usec)) << endl;        
    }

    return 0;
}
示例#13
0
文件: video.hpp 项目: verzhak/stend
		inline unsigned frame_num() { return video.get(CV_CAP_PROP_FRAME_COUNT); };
示例#14
0
文件: video.hpp 项目: verzhak/stend
		inline Size size() { return Size(video.get(CV_CAP_PROP_FRAME_WIDTH), video.get(CV_CAP_PROP_FRAME_HEIGHT)); };
示例#15
0
void fichierControl::openVideo(QString &fileName, QGraphicsView *videoGraphicsview, QProgressBar *progressBar)
{
    bool stop = false;
    if (!cap.open(fileName.toStdString())){
          cout << "Cannot read the frame from video file" << endl;
    }
    //get the number of frame
    long totalFrameNumber = cap.get(CV_CAP_PROP_FRAME_COUNT);
    cout<<"Number of frame"<<totalFrameNumber<<endl;

    //start the video at 300ms
    long frameToStart = 300;
    cap.set( CV_CAP_PROP_POS_FRAMES,frameToStart);
    cout<<"Frame to start"<<frameToStart<<endl;

    //stop the video at 400ms
    int frameToStop = 400;

    if(frameToStop < frameToStart)
    {
        cout<<"Frametostop smaller than frametostart!"<<endl;
    }
    else
    {
        cout<<"Frame to stop"<<frameToStop<<endl;
    }

    //get the frames per seconds of the video
    double rate = cap.get(CV_CAP_PROP_FPS);
    cout<<"the frames per seconds"<<rate<<endl;

    int delay = 1000/rate;
    currentFrame = frameToStart;

    //set the minimum and maximum value of progressBar
    progressBar->setMinimum(frameToStart);
    progressBar->setMaximum(frameToStop);
    //namedWindow("MyVideo",WINDOW_NORMAL); //create a window called "MyVideo"
    //resizeWindow("MyVideo", 400, 300);
    //Create Trackbar
    /*if(totalFrameNumber != 0){
         createTrackbar("Position", "MyVideo", &currentFrame, totalFrameNumber, tbCallback, &frame);
    }*/
    while(!stop)
    {
         bool bSuccess = cap.read(frame); // read a new frame from video
         if (!bSuccess) //if not success, break loop
         {
                cout << "Cannot read the frame from video file" << endl;
         }
         /*******/
          if(frame.data){
              cvtColor(frame, frame, CV_BGR2RGB);  //Qt support RGB, OpenCv support BGR
          }else{
              cout << "Frame no data" << endl;
          }
          QImage image = QImage((uchar*)(frame.data), frame.cols, frame.rows, frame.step, QImage::Format_RGB888);
          QImage result = image.scaled(800,600).scaled(495,325, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
          QGraphicsScene *scene = new QGraphicsScene;
          scene->addPixmap(QPixmap::fromImage(result));
          videoGraphicsview->setScene(scene);
          videoGraphicsview->show();
          cout<<"currentFrame"<<currentFrame<<endl;
         /*******/
         //imshow("MyVideo", frame); //show the frame in "MyVideo" window
         if(waitKey(delay) == 27 || currentFrame >= frameToStop) //wait for 'esc' key press for 30 ms. If 'esc' key is pressed, break loop
         {
               cout << "esc key is pressed by user" << endl;
               stop = true;
         }
         //suspendre
        if( waitKey(delay) >= 0)
        {
            waitKey(0);
        }
        currentFrame++;
        progressBar->setValue(currentFrame);
         //setTrackbarPos("Position", "MyVideo",currentFrame);
     }
    //Close video file
    cap.release();
    waitKey(0);
}
示例#16
0
int main(int _nargs, char** _vargs)
{
	if (_nargs < 2)
	{
		cout << "Not enough arguments.\nUsage:\n\tT2 <input_file>\n";
		return EXIT_FAILURE;
	}

	Config::load("../config/config");

	DescType descriptorType = Config::getDescriptorType();
	MetricType metricType = Config::getMetricType();
	int param = Config::getDescriptorParam();

	// Get input file name
	string inputFile = _vargs[1];

	// Get descriptors for each query video
	vector<string> queryLocations = Helper::getQueryLocations(inputFile);
	map<string, vector<DescriptorPtr>> queryDescriptors;
	getQueryDescriptors(queryDescriptors, queryLocations, descriptorType, Config::getQuerySkippedFrames(), param);

	string targetLocation = Helper::getTargetLocation(inputFile);
	cout << "Target video: " << targetLocation << "\n";

	VideoCapture capture;
	capture.open(targetLocation);
	if (!capture.isOpened())
	{
		cout << "ERROR: Can't open target video " << targetLocation << endl;
		return EXIT_FAILURE;
	}
	else
	{
		int skipFrames = Helper::getSkipFrames(Config::getTargetFrameRate(), capture);
		double totalFrames = capture.get(CV_CAP_PROP_FRAME_COUNT);
		vector<MatchArrayPtr> matches;
		matches.reserve((int) (totalFrames / skipFrames) + 1);

		cout << "Total frames in target: " << totalFrames << "\n";

		// Distance limits to be used later
		double minDistance = numeric_limits<double>::max();
		vector<double> minDistances;
		minDistances.reserve(81000 / skipFrames + 1);

		// Process the target video searching for each query
		Mat frame, grayFrame;
		int k = 0, t = 0;
		high_resolution_clock::time_point t1 = high_resolution_clock::now();
		for (int j = 0; j < totalFrames; j++)
		{
			if (!capture.grab() || !capture.retrieve(frame))
			{
				cout << "ERROR: Can't get frame from target video. Finishing.\n";
				break;
			}

			if (k == skipFrames)
			{
				cvtColor(frame, grayFrame, COLOR_BGR2GRAY);
				DescriptorPtr currentFrameDescriptor(new Descriptor(grayFrame, j, descriptorType, param));

				// Generate object to store matches for this frame
				matches.push_back(MatchArrayPtr(new MatchArray(j)));

				// Search the current frame in each query video
				for (pair<string, vector<DescriptorPtr>> entry : queryDescriptors)
				{
					vector<Match> queryMatch;
					Helper::findNearestFrames(currentFrameDescriptor, entry.second, metricType, queryMatch);
					matches.back()->addMatch(entry.first, queryMatch);
				}

				k = 0;
				double lastFrameMin = matches.back()->getMinDistance();
				minDistances.push_back(lastFrameMin);
				minDistance = minDistance > lastFrameMin ? lastFrameMin : minDistance;
			}
			k++;

			if (t == 10000)
			{
				high_resolution_clock::time_point t2 = high_resolution_clock::now();
				auto duration = chrono::duration_cast<chrono::microseconds>(t2 - t1).count();
				cout << "Processing frame " << j << "/" << totalFrames << " - Elapsed time: " << duration / 1E6 << " seg\n";
				t = 0;
			}
			t++;
		}

		sort(minDistances.begin(), minDistances.end());
		double medianDistance = Helper::getMedian(minDistances);
		Scalar meanDistance, stdDev;
		meanStdDev(minDistances, meanDistance, stdDev);

		double thresholdDistance = fmax(minDistance * 2, fmin(medianDistance, meanDistance[0] - stdDev[0]));

		cout << "Extracted " << matches.size() << " from target video.\n";

		// Extract the appearances of each query video
		double fps = capture.get(CV_CAP_PROP_FPS);
		map<string, vector<Appearance>> appearances = extractQueryAppearanceTimes(matches, fps, thresholdDistance, Config::getMinVideoLength());

		// Total ellapsed time
		high_resolution_clock::time_point t2 = high_resolution_clock::now();
		auto duration = chrono::duration_cast<chrono::microseconds>(t2 - t1).count();

		// Print appearances
		FILE *resFile;
		string resFileName = "../results/DESCTYPE_";
		resFileName += Descriptor::ToString(descriptorType);
		resFileName += "-PARAM_";
		resFileName += to_string(param);
		resFileName += "-METRIC_";
		resFileName += Metric::ToString(metricType);
		resFile = fopen(resFileName.c_str(), "w");

		fprintf(resFile, "#ELLAPSED TIME: %.2f\n", duration / 1E6);

		for (pair<string, vector<Appearance>> entry : appearances)
		{
			for (Appearance ap : entry.second)
			{
				printf("Query: %-50s --start-time=%.2f --run-time=%.2f\n", entry.first.c_str(), ap.startTime, ap.length);
				fprintf(resFile, "Query: %-50s --start-time=%.2f --run-time=%.2f\n", entry.first.c_str(), ap.startTime, ap.length);
			}
		}
	}

	return EXIT_SUCCESS;
}
示例#17
0
int main(int argc, char** argv){
  Mat image;
  int width, height;
  VideoCapture cap;
  vector<Mat> planes;
  Mat histR, histG, histB;
  int nbins = 64;
  float range[] = {0, 256};
  const float *histrange = { range };
  bool uniform = true;
  bool acummulate = false;

  cap.open(0);
  
  if(!cap.isOpened()){
    cout << "cameras indisponiveis";
    return -1;
  }
  
  width  = cap.get(CV_CAP_PROP_FRAME_WIDTH);
  height = cap.get(CV_CAP_PROP_FRAME_HEIGHT);

  cout << "largura = " << width << endl;
  cout << "altura  = " << height << endl;

  int histw = nbins, histh = nbins/2;
  Mat histImgR(histh, histw, CV_8UC3, Scalar(0,0,0));
  Mat histImgG(histh, histw, CV_8UC3, Scalar(0,0,0));
  Mat histImgB(histh, histw, CV_8UC3, Scalar(0,0,0));

  while(1){
    cap >> image;

    split (image, planes);
    calcHist(&planes[0], 1, 0, Mat(), histR, 1,
             &nbins, &histrange,
             uniform, acummulate);
    calcHist(&planes[1], 1, 0, Mat(), histG, 1,
             &nbins, &histrange,
             uniform, acummulate);
    calcHist(&planes[2], 1, 0, Mat(), histB, 1,
             &nbins, &histrange,
             uniform, acummulate);

    normalize(histR, histR, 0, histImgR.rows, NORM_MINMAX, -1, Mat());
    normalize(histG, histB, 0, histImgR.rows, NORM_MINMAX, -1, Mat());
    normalize(histB, histB, 0, histImgR.rows, NORM_MINMAX, -1, Mat());

    histImgR.setTo(Scalar(0));
    histImgG.setTo(Scalar(0));
    histImgB.setTo(Scalar(0));
    
    for(int i=0; i<nbins; i++){
      line(histImgR, Point(i, histh),
           Point(i, cvRound(histR.at<float>(i))),
           Scalar(0, 0, 255), 1, 8, 0);
      line(histImgG, Point(i, histh),
           Point(i, cvRound(histG.at<float>(i))),
           Scalar(0, 255, 0), 1, 8, 0);
      line(histImgB, Point(i, histh),
           Point(i, cvRound(histB.at<float>(i))),
           Scalar(255, 0, 0), 1, 8, 0);
    }
    histImgR.copyTo(image(Rect(0, 0       ,nbins, histh)));
    histImgG.copyTo(image(Rect(0, histh   ,nbins, histh)));
    histImgB.copyTo(image(Rect(0, 2*histh ,nbins, histh)));
    imshow("image", image);
    if(waitKey(30) >= 0) break;
  }
  return 0;
}
/**
 * Uses moving average to determine change percent.
 *
 * argv[1] = source file or will default to "../../resources/traffic.mp4" if no
 * args passed.
 *
 * @author sgoldsmith
 * @version 1.0.0
 * @since 1.0.0
 */
int main(int argc, char *argv[]) {
	int return_val = 0;
	string url = "../../resources/traffic.mp4";
	string output_file = "../../output/motion-detect-cpp.avi";
	cout << CV_VERSION << endl;
	VideoCapture capture;
	Mat image;
	// See if URL arg passed
	if (argc == 2) {
		url = argv[1];
	}
	cout << "Input file:" << url << endl;
	cout << "Output file:" << output_file << endl;
	capture.open(url);
	// See if video capture opened
	if (capture.isOpened()) {
		cout << "Resolution: " << capture.get(CV_CAP_PROP_FRAME_WIDTH) << "x"
				<< capture.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;
		bool exit_loop = false;
		// Video writer
		VideoWriter writer(output_file, (int) capture.get(CAP_PROP_FOURCC),
				(int) capture.get(CAP_PROP_FPS),
				Size((int) capture.get(CAP_PROP_FRAME_WIDTH),
						(int) capture.get(CAP_PROP_FRAME_HEIGHT)));
		Mat work_img;
		Mat moving_avg_img;
		Mat gray_img;
		Mat diff_img;
		Mat scale_img;
		double motion_percent = 0.0;
		int frames_with_motion = 0;
		int frames = 0;
		Scalar color = Scalar(0, 255, 0);
		timeval start_time;
		gettimeofday(&start_time, 0);
		// Process all frames
		while (capture.read(image) && !exit_loop) {
			if (!image.empty()) {
				// Generate work image by blurring
				blur(image, work_img, Size(8, 8));
				// Generate moving average image if needed
				if (moving_avg_img.empty()) {
					moving_avg_img = Mat::zeros(work_img.size(), CV_32FC3);
				}
				// Generate moving average image
				accumulateWeighted(work_img, moving_avg_img, 0.03);
				// Convert the scale of the moving average
				convertScaleAbs(moving_avg_img, scale_img);
				// Subtract the work image frame from the scaled image average
				absdiff(work_img, scale_img, diff_img);
				// Convert the image to grayscale
				cvtColor(diff_img, gray_img, COLOR_BGR2GRAY);
				// Convert to BW
				threshold(gray_img, gray_img, 25, 255, THRESH_BINARY);
				// Total number of changed motion pixels
				motion_percent = 100.0 * countNonZero(gray_img) / image.total();
				// Detect if camera is adjusting and reset reference if more than
				// 25%
				if (motion_percent > 25.0) {
					work_img.convertTo(moving_avg_img, CV_32FC3);
				} else {
					// Threshold trigger motion
					if (motion_percent > 0.75) {
						frames_with_motion++;
						vector<vector<Point> > movement_locations =
								motion_contours(gray_img);
						// Process all points
						for (size_t i = 0, max = movement_locations.size();
								i != max; ++i) {
							Rect bounding_rect = boundingRect(
									movement_locations[i]);
							rectangle(image, bounding_rect.tl(),
									bounding_rect.br(), color, 2, 8, 0);
						}
					}
				}
				// Write frame with motion rectangles
				writer.write(image);
				frames++;
			} else {
				cout << "No frame captured" << endl;
				exit_loop = true;
			}
		}
		timeval end_time;
		gettimeofday(&end_time, 0);
		cout << frames << " frames, " << frames_with_motion
				<< " frames with motion" << endl;
		cout << "FPS " << (frames / (end_time.tv_sec - start_time.tv_sec))
				<< ", elapsed time: " << (end_time.tv_sec - start_time.tv_sec)
				<< " seconds" << endl;
		// Release VideoWriter
		writer.release();
		// Release VideoCapture
		capture.release();
	} else {
		cout << "Unable to open device" << endl;
		return_val = -1;
	}
	return return_val;
}
示例#19
0
void colorKeyingHSV(const string&videoPath){
	// Video laden
	VideoCapture video;
	video.open(videoPath);
	int width = video.get(CV_CAP_PROP_FRAME_WIDTH);
	int height = video.get(CV_CAP_PROP_FRAME_HEIGHT);

	namedWindow("Video");
	namedWindow("Hue");
	createTrackbar("Lower", "Hue", 0, 180);
	setTrackbarPos("Lower", "Hue", lowerHue);
	createTrackbar("Upper", "Hue", 0, 180);
	setTrackbarPos("Upper", "Hue", upperHue);

	namedWindow("Saturation");
	createTrackbar("Select", "Saturation", 0, 255);
	setTrackbarPos("Select", "Saturation", threshSaturation);
	namedWindow("Maske");

	Mat hueFrame(height, width, CV_8UC1);
	Mat saturationFrame(height, width, CV_8UC1);
	Mat mask(height, width, CV_8UC1);

	int frameNumber = 0;
	while(true){
		Mat videoFrame;
		if (video.read(videoFrame) == false){
			break;
		}

		// in Graustufen wandeln
		Mat hsvFrame;
		cvtColor(videoFrame, hsvFrame, CV_BGR2HSV);	

		// Schwellen holen
		int threshSaturation = getTrackbarPos("Select", "Saturation");
		int lowerThreshHue = getTrackbarPos("Lower", "Hue");
		int upperThreshHue = getTrackbarPos("Upper", "Hue");

		// Pixel analysieren
		int sumx = 0;
		int sumy = 0;
		int countWhites = 0;
		for(int x = 0; x < videoFrame.cols; x++){
			for(int y = 0; y < videoFrame.rows; y++){
				Vec3b hsvPixel = hsvFrame.at<Vec3b>(y,x);
				int hue = hsvPixel[0];
				int saturation = hsvPixel[1];

				// Maskierung und Schwerpunktsberechnung
				if (saturation > threshSaturation && hue > lowerThreshHue && hue < upperThreshHue){
					mask.at<uchar>(y,x) = 255;
					sumx += x;
					sumy += y;
					countWhites++;
				}
				else{
					mask.at<uchar>(y,x) = 0;
				}

				// die folgenden Schritte sind eigentlich nicht nötig, sie dienen der Veranschaulichung
				if (hue > lowerThreshHue && hue < upperThreshHue){
					hueFrame.at<uchar>(y,x) = 255;
				}
				else{
					hueFrame.at<uchar>(y,x) = 0;
				}
				if (saturation > threshSaturation){
					saturationFrame.at<uchar>(y,x) = 255;
				}
				else{
					saturationFrame.at<uchar>(y,x) = 0;
				}
			}
		}
	
		// Schwerpunkt berechnen
		if (countWhites > 0){
			Point center(sumx/countWhites, sumy/countWhites);
			cross(videoFrame, center, crossLength, colorGreen);
		}
		
		imshow("Hue", hueFrame);
		imshow("Saturation", saturationFrame);
		imshow("Maske", mask);
		imshow("Video", videoFrame);
		waitKey(100);
	}
}
int main(int argc, char** argv)
{
    CommandLineParser parser(argc, argv, params);

    if (parser.get<bool>("help"))
    {
        cout << about << endl;
        parser.printMessage();
        return 0;
    }

    String modelConfiguration = parser.get<string>("proto");
    String modelBinary = parser.get<string>("model");

    //! [Initialize network]
    dnn::Net net = readNetFromCaffe(modelConfiguration, modelBinary);
    //! [Initialize network]

    if (parser.get<bool>("opencl"))
    {
        net.setPreferableTarget(DNN_TARGET_OPENCL);
    }

    if (net.empty())
    {
        cerr << "Can't load network by using the following files: " << endl;
        cerr << "prototxt:   " << modelConfiguration << endl;
        cerr << "caffemodel: " << modelBinary << endl;
        cerr << "Models can be downloaded here:" << endl;
        cerr << "https://github.com/chuanqi305/MobileNet-SSD" << endl;
        exit(-1);
    }

    VideoCapture cap;
    if (parser.get<String>("video").empty())
    {
        int cameraDevice = parser.get<int>("camera_device");
        cap = VideoCapture(cameraDevice);
        if(!cap.isOpened())
        {
            cout << "Couldn't find camera: " << cameraDevice << endl;
            return -1;
        }
    }
    else
    {
        cap.open(parser.get<String>("video"));
        if(!cap.isOpened())
        {
            cout << "Couldn't open image or video: " << parser.get<String>("video") << endl;
            return -1;
        }
    }

    Size inVideoSize;
    inVideoSize = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH),    //Acquire input size
                       (int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));

    Size cropSize;
    if (inVideoSize.width / (float)inVideoSize.height > WHRatio)
    {
        cropSize = Size(static_cast<int>(inVideoSize.height * WHRatio),
                        inVideoSize.height);
    }
    else
    {
        cropSize = Size(inVideoSize.width,
                        static_cast<int>(inVideoSize.width / WHRatio));
    }

    Rect crop(Point((inVideoSize.width - cropSize.width) / 2,
                    (inVideoSize.height - cropSize.height) / 2),
              cropSize);

    double fps = cap.get(CV_CAP_PROP_FPS);
    int fourcc = static_cast<int>(cap.get(CV_CAP_PROP_FOURCC));
    VideoWriter outputVideo;
    outputVideo.open(parser.get<String>("out") ,
                     (fourcc != 0 ? fourcc : VideoWriter::fourcc('M','J','P','G')),
                     (fps != 0 ? fps : 10.0), cropSize, true);

    for(;;)
    {
        Mat frame;
        cap >> frame; // get a new frame from camera/video or read image

        if (frame.empty())
        {
            waitKey();
            break;
        }

        if (frame.channels() == 4)
            cvtColor(frame, frame, COLOR_BGRA2BGR);

        //! [Prepare blob]
        Mat inputBlob = blobFromImage(frame, inScaleFactor,
                                      Size(inWidth, inHeight), meanVal, false); //Convert Mat to batch of images
        //! [Prepare blob]

        //! [Set input blob]
        net.setInput(inputBlob, "data"); //set the network input
        //! [Set input blob]

        //! [Make forward pass]
        Mat detection = net.forward("detection_out"); //compute output
        //! [Make forward pass]

        vector<double> layersTimings;
        double freq = getTickFrequency() / 1000;
        double time = net.getPerfProfile(layersTimings) / freq;

        Mat detectionMat(detection.size[2], detection.size[3], CV_32F, detection.ptr<float>());

        frame = frame(crop);

        ostringstream ss;
        if (!outputVideo.isOpened())
        {
            ss << "FPS: " << 1000/time << " ; time: " << time << " ms";
            putText(frame, ss.str(), Point(20,20), 0, 0.5, Scalar(0,0,255));
        }
        else
            cout << "Inference time, ms: " << time << endl;

        float confidenceThreshold = parser.get<float>("min_confidence");
        for(int i = 0; i < detectionMat.rows; i++)
        {
            float confidence = detectionMat.at<float>(i, 2);

            if(confidence > confidenceThreshold)
            {
                size_t objectClass = (size_t)(detectionMat.at<float>(i, 1));

                int xLeftBottom = static_cast<int>(detectionMat.at<float>(i, 3) * frame.cols);
                int yLeftBottom = static_cast<int>(detectionMat.at<float>(i, 4) * frame.rows);
                int xRightTop = static_cast<int>(detectionMat.at<float>(i, 5) * frame.cols);
                int yRightTop = static_cast<int>(detectionMat.at<float>(i, 6) * frame.rows);

                ss.str("");
                ss << confidence;
                String conf(ss.str());

                Rect object((int)xLeftBottom, (int)yLeftBottom,
                            (int)(xRightTop - xLeftBottom),
                            (int)(yRightTop - yLeftBottom));

                rectangle(frame, object, Scalar(0, 255, 0));
                String label = String(classNames[objectClass]) + ": " + conf;
                int baseLine = 0;
                Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
                rectangle(frame, Rect(Point(xLeftBottom, yLeftBottom - labelSize.height),
                                      Size(labelSize.width, labelSize.height + baseLine)),
                          Scalar(255, 255, 255), CV_FILLED);
                putText(frame, label, Point(xLeftBottom, yLeftBottom),
                        FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0,0,0));
            }
        }

        if (outputVideo.isOpened())
            outputVideo << frame;

        imshow("detections", frame);
        if (waitKey(1) >= 0) break;
    }

    return 0;
} // main