示例#1
0
int slow_main(int argc, char *argv[]) {
	cv::VideoCapture vid(0);
	if(!vid.isOpened()) {
		return -1;
	}
	cv::Mat flow, frame;
	cv::UMat gray, prevgray, uflow;
	cv::namedWindow("optflow");
	for(;;) {
		vid >> frame;
		cvtColor(frame, gray, cv::COLOR_BGR2GRAY);
		if(!prevgray.empty()) {
			calcOpticalFlowFarneback(prevgray, gray, uflow, 0.5, 3, 15, 3, 5, 1.2, 0);
			uflow.copyTo(flow);
			cv::imshow("optflow", frame);
		}
		cv::Point2f total;
		const int step = 16;
		for(unsigned int y = 0; y < flow.rows; y += step) {
			for(unsigned int x = 0; x < flow.cols; x += step) {
				total += flow.at<cv::Point2f>(y, x);
			}
		}
		if(sqrt(total.x * total.x + total.y * total.y) > 500) {
			std::cout << total << std::endl;
		}
		if(cv::waitKey(30) >= 0) {
			break;
		} else {
			std::swap(prevgray, gray);
		}
	}
	return 0;
}
示例#2
0
void OptFlowThread::run()
{
	if( !cap.isOpened() )
		return;

	Mat prevgray;
	Mat gray;
	Mat flow;
	Mat cflow; 
	Mat frame;

	while(true)
	{
		cap >> frame;
		cvtColor(frame,gray,COLOR_BGR2GRAY);

		if(prevgray.data)
		{
			//Use Gunnar Farneback algorithm to compute optical flow.
			calcOpticalFlowFarneback(prevgray,gray,flow,0.5,3,15,3,5,1.2,0);
			cvtColor(prevgray,cflow,COLOR_GRAY2BGR);

			//Draw green points.
			drawOptFlowMap(flow,cflow,16,3,Scalar(0,255,0));
			emit NewOptFlowFrame(&cflow);
		}

		std::swap(prevgray,gray);
		waitKey(30);
	}

	return;
}
示例#3
0
void optic_flow( cv::Mat mGray1, cv::Mat mGray2, cv::Mat& flow, cv::Mat& mAnnotated )
{
	cv::Mat frame1_gray, frame2_gray;
	cv::UMat  flowUmat;
	struct timeval start1,end1;	
	start1 = GetTimeStamp();
	calcOpticalFlowFarneback(mGray1, mGray2, 
							flowUmat, 0.5, 3, 15, 3, 5, 1.2, 0);
	flowUmat.copyTo(flow);    

    for (int y = 0; y < mAnnotated.rows; y += 5) 
     	for (int x = 0; x < mAnnotated.cols; x += 5)
     	{
			// get the flow from y, x position * 10 for better visibility
			const cv::Point2f flowatxy = flow.at<cv::Point2f>(y, x) * 10;

            // draw line at flow direction
       		line(mAnnotated, cv::Point(x, y), cv::Point( cvRound(x + flowatxy.x), 
       			 cvRound(y + flowatxy.y)), cv::Scalar(255,0,0));

            // draw initial point
       		circle(mAnnotated, cv::Point(x, y), 1, cv::Scalar(0, 0, 0), -1);
      	}
	end1 = GetTimeStamp();
	float delta = ((end1.tv_sec-start1.tv_sec)*1000 - (end1.tv_usec - start1.tv_usec));
	printf("OpticalFlowFarneback() Duration =  %8.3f\n",  delta/1000 );

}
示例#4
0
bool DenseFlowMatcher::match(const cv::Mat&             img1,
                             const cv::Mat&             img2,
                             std::vector<cv::KeyPoint>& feat1,
                             std::vector<cv::KeyPoint>& feat2,
                             cv::Mat&                   desc1,
                             cv::Mat&                   desc2,
                             std::vector<cv::DMatch>&   matches) const
{
    static const double pyrmid_scale = 0.5;
    static const int    levels       = 3;
    static const int    window_size  = 15;
    static const int    iterations   = 3;
    static const int    poly_n       = 5;
    static const double poly_sigma   = 1.1;
    static const int    flags        = 0;

    Mat flow;

    calcOpticalFlowFarneback(
        img1,
        img2,
        flow,
        pyrmid_scale,
        levels,
        window_size,
        iteraitons,
        poly_n,
        poly_sigma,
        flags);



    return !matches.empty();
}
void OpticalFlowCalculater::doCalc(cv::Mat grayImg){
    if(this->previousFrame.empty()){
        this->previousFrame = grayImg;
        emit this->calcCompete(false, this->flow);
    }else{
        calcOpticalFlowFarneback(this->previousFrame, grayImg, this->flow, 0.5, 3, 40, 3, 7, 1.5, 0);
        emit this->calcCompete(true, this->flow);
    }
}
示例#6
0
void GetTrackedPoints(const mat3b & im1, const mat3b & im2, vector<TrackedPoint> & points_out, 
		      int maxCorners, float qualityLevel, float minDistance, int blockSize,
		      int winSize_, int maxLevel, int criteriaN, float criteriaEps) {
#if 1
  const int useHarrisDetector = 0;
  const float k = 0.04f;
  const Size winSize(winSize_, winSize_);
  const TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
					     criteriaN, criteriaEps);
  const double derivLambda = 0;
  const int flags = 0;
  assert(im1.size() == im2.size());
  matb im1gray;
  cvtColor(im1, im1gray, CV_BGR2GRAY);
#ifdef OPENCV_2_1
  Mat mask;
  vector<Point2f> corners1, corners2;
  vector<uchar> status;
  vector<float> err;
  goodFeaturesToTrack(im1gray, corners1, maxCorners, qualityLevel, minDistance,
		      mask, blockSize, useHarrisDetector, k);
  calcOpticalFlowPyrLK(im1, im2, corners1, corners2, status, err, winSize, maxLevel,
		       criteria, derivLambda, flags);
  for (int i = 0; i < (signed)corners1.size(); ++i)
    if (status[i])
      points_out.push_back(TrackedPoint(corners1[i].x, corners1[i].y,
					corners2[i].x, corners2[i].y));
#else
  Mat corners1, corners2, status, err;
  goodFeaturesToTrack(im1gray, corners1, maxCorners, qualityLevel, minDistance,
		      noArray(), blockSize, useHarrisDetector, k);
  calcOpticalFlowPyrLK(im1, im2, corners1, corners2, status, err, winSize, maxLevel,
		       criteria, derivLambda, flags);
  for (int i = 0; i < corners1.size().height; ++i)
    if (status.at<unsigned char>(i,0))
      points_out.push_back(TrackedPoint(corners1.at<Vec2f>(i,0)[0],corners1.at<Vec2f>(i,0)[1],
					corners2.at<Vec2f>(i,0)[0],corners2.at<Vec2f>(i,0)[1]));
#endif
#else
  matb im1_gray, im2_gray;
  cvtColor(im1, im1_gray, CV_BGR2GRAY);
  cvtColor(im2, im2_gray, CV_BGR2GRAY);
  Mat flow_cv(im1.size().height, im1.size().width, CV_32FC2);
  calcOpticalFlowFarneback(im1_gray, im2_gray, flow_cv, 0.5, 5, 11, 10, 5, 1.1, 0);
  
  points_out.clear();
  for (int i = 20; i < im1.size().height-20; i += 20)
    for (int j = 20; j < im1.size().width-20; j += 20) {
      const Vec2f f = flow_cv.at<Vec2f>(i, j);
      points_out.push_back(TrackedPoint(j, i, j+f[0], i+f[1]));
    }
  cout << "n points " << points_out.size() << endl;
#endif
}
示例#7
0
void calcDenseFlow(string file_name, int bound, int type, int step,
                   vector<vector<uchar> >& output_x,
                   vector<vector<uchar> >& output_y,
                   vector<vector<uchar> >& output_img){

    VideoCapture video_stream(file_name);
    CHECK(video_stream.isOpened())<<"Cannot open video stream \""
                                  <<file_name
                                  <<"\" for optical flow extraction.";

    Mat capture_frame, capture_image, prev_image, capture_gray, prev_gray;
    Mat flow, flow_split[2];


    bool initialized = false;
    for(int iter = 0;; iter++){
        video_stream >> capture_frame;
        if (capture_frame.empty()) break; // read frames until end

        //build mats for the first frame
        if (!initialized){
            initializeMats(capture_frame, capture_image, capture_gray,
                           prev_image, prev_gray);
            capture_frame.copyTo(prev_image);
            cvtColor(prev_image, prev_gray, CV_BGR2GRAY);
            initialized = true;
//            LOG(INFO)<<"Initialized";
        }else if(iter % step == 0){
            capture_frame.copyTo(capture_image);
            cvtColor(capture_image, capture_gray, CV_BGR2GRAY);
            calcOpticalFlowFarneback(prev_gray, capture_gray, flow,
                                     0.702, 5, 10, 2, 7, 1.5,
                                     cv::OPTFLOW_FARNEBACK_GAUSSIAN );

            vector<uchar> str_x, str_y, str_img;
            split(flow, flow_split);
            encodeFlowMap(flow_split[0], flow_split[0], str_x, str_y, bound);
            imencode(".jpg", capture_image, str_img);

            output_x.push_back(str_x);
            output_y.push_back(str_y);
            output_img.push_back(str_img);
//            LOG(INFO)<<iter;

            std::swap(prev_gray, capture_gray);
            std::swap(prev_image, capture_image);
        }
    }

}
Mat DepthEstimator1::estimateDepth(const LightFieldPicture lightfield)
{
	// render images
	ImageRenderer3 renderer = ImageRenderer3();
	renderer.setLightfield(lightfield);
	renderer.setAlpha(ALPHA);

	const Vec2i leftPosition = Vec2i(-5,0);
	renderer.setPinholePosition(leftPosition);
	Mat image1 = renderer.renderImage();
	cvtColor(image1, image1, CV_RGB2GRAY);
	image1.convertTo(image1, CV_8UC1, 255.0);
		
	const Vec2i rightPosition = Vec2i(5,0);
	renderer.setPinholePosition(rightPosition);
	Mat image2 = renderer.renderImage();
	cvtColor(image2, image2, CV_RGB2GRAY);
	image2.convertTo(image2, CV_8UC1, 255.0);

	// compute optical flow
	Mat opticalFlow;
	calcOpticalFlowFarneback(image1, image2, opticalFlow, 0.5, 3, 15, 3, 5, 1.2, 0);

	Mat flowMap;
	image1.copyTo(flowMap);
	drawOptFlowMap(opticalFlow, flowMap, 16, 1.5, Scalar(0, 255, 0));

	/* start of debugging code */
	const int windowFlags = WINDOW_NORMAL;
	const string window1 = "image1";
	namedWindow(window1, windowFlags);
	imshow(window1, image1);

	const string window2 = "image2";
	namedWindow(window2, windowFlags);
	imshow(window2, image2);
	
	const string window3 = "optical flow";
	namedWindow(window3, windowFlags);
	imshow(window3, flowMap);

	//cout << "optical flow = " << opticalFlow << endl;
	
	waitKey(0);
	/* end of debugging code */

	return opticalFlow;
}
示例#9
0
Point2i SamplingOpticalFlow( const Mat &fir,
		const Mat &sec,
        const Mat &mask,
		vector<Point2f> &pt_src,
		vector<Point2f> &flo)
{
	bool useDense = USEDENSE;
    double scalar = 1;
	Mat src; cvtColor( fir, src, CV_RGB2GRAY);
	Mat dst; cvtColor( sec, dst, CV_RGB2GRAY);
    resize(src, src, Size((int)src.rows*scalar, (int)src.cols*scalar));
    resize(dst, dst, Size((int)dst.rows*scalar, (int)dst.cols*scalar));
    Point2i imgsize(src.cols, src.rows);
//	
    if (!useDense)
	{
		vector<Point2f> pt_dst; vector<uchar> status;vector<float>err;
        pt_src.clear();
        for ( int h = 0; h <src.rows; h+=2)
            for ( int w = 0; w < src.cols; w+=2)
                pt_src.push_back( Point2f(w,h));
        
		pt_dst.reserve( pt_src.size());
		flo.reserve( pt_src.size());
		calcOpticalFlowPyrLK( src, dst, pt_src, pt_dst, status, err);
		for ( uint n = 0; n < pt_src.size(); ++n)
		{
			flo.push_back(Point2f(pt_src[n].x - pt_dst[n].x, pt_src[n].y - pt_dst[n].y));
            if(!status[n] || dist(Point( pt_src[n].x, pt_src[n].y), Point(pt_src[n].x + flo[n].x, pt_src[n].y + flo[n].y)) >= INITPSIZEW)
				flo[n].x = flo[n].y = 0;
		}
	}
	else
	{
		Mat floMat;
		calcOpticalFlowFarneback( src, dst, floMat, 0.75, 3, 30, 10, 5, 1.5, OPTFLOW_FARNEBACK_GAUSSIAN);
		for (int y = 0; y<floMat.rows; y++) {
			for (int x = 0; x < floMat.cols; x++) {
				if ( !mask.at<int>(y,x))
					flo.push_back(Point2f(floMat.at<float>(y,2*x), floMat.at<float>(y,2*x+1)));
				else
					flo.push_back(Point2f(0.0f,0.0f));
			}
		}
	}
    return imgsize;
}
示例#10
0
void camera_process(boost::shared_ptr<cv::Mat> frame) {
	cv::Mat flow;
	cvtColor(*frame, gray, cv::COLOR_BGR2GRAY);
	if(!prevgray.empty()) {
		calcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
	}
	cv::Point2f total;
	const int step = 16;
	for(unsigned int y = 0; y < flow.rows; y += step) {
		for(unsigned int x = 0; x < flow.cols; x += step) {
			total += flow.at<cv::Point2f>(y, x);
		}
	}
	if(sqrt(total.x * total.x + total.y * total.y) > 500) {
		std::cout << total << std::endl;
	}
	std::swap(prevgray, gray);
}
示例#11
0
int main( int argc, char* argv[])
{
    //Parameter initialisation
    std::vector<double> para;
    if( argc == 1)
    {
        std::cout << "Reading from input.txt\n";
        try{ para = file::read_input( "input.txt"); }
        catch (toefl::Message& m) 
        {  
            m.display(); 
            throw m;
        }
    }
    else if( argc == 2)
    {
        std::cout << "Reading from "<<argv[1]<<"\n";
        try{ para = file::read_input( argv[1]); }
        catch (toefl::Message& m) 
        {  
            m.display(); 
            throw m;
        }
    }
    else
    {
        std::cerr << "ERROR: Too many arguments!\nUsage: "<< argv[0]<<" [filename]\n";
        return -1;
    }
    omp_set_num_threads( para[20]);
    std::cout<< "With "<<omp_get_max_threads()<<" threads\n";
    const toefl::Parameters p(para);
    field_ratio = p.lx/p.ly;
    if( p.bc_x != toefl::TL_PERIODIC)
    {
        std::cerr << "Only periodic boundaries allowed!\n";
        return -1;
    }
    
    try{p.consistencyCheck();}
    catch( toefl::Message& m){m.display();throw m;}
    p.display(std::cout);
    //construct solvers 
    toefl::DFT_DFT_Solver<3> solver( p);

    // place some gaussian blobs in the field
    try{
        toefl::Matrix<double, toefl::TL_DFT> ne{ p.ny, p.nx, 0.}, nz{ ne}, phi{ ne};
        init_gaussian( ne, p.posX, p.posY, p.blob_width/p.lx, p.blob_width/p.ly, p.amp);
        //init_gaussian_column( nz, 0.6, 0.05/field_ratio, p.imp_amp);
        std::array< toefl::Matrix<double, toefl::TL_DFT>,3> arr3{{ ne, nz, phi}};
        //now set the field to be computed
        solver.init( arr3, toefl::IONS);
    }catch( toefl::Message& m){m.display();}

    cv::VideoCapture cap(0);
    if( !cap.isOpened())
    {
        std::cerr << "Camera not found\n";
        return -1;
    }
    cap.set( CV_CAP_PROP_FRAME_WIDTH,  p.nx);
    cap.set( CV_CAP_PROP_FRAME_HEIGHT, p.ny);
    cv::Mat last, current, flow, vel(p.ny, p.nx, CV_32F);
    std::vector<cv::Mat> v;

    cv::namedWindow("Current",cv::WINDOW_NORMAL);
    cv::namedWindow("Velocity",cv::WINDOW_NORMAL);
    double t = 0.;
    toefl::Timer timer;
    toefl::Timer overhead;
    solver.first_step();
    solver.second_step();
    t+= 2*p.dt;
    toefl::Matrix<double, toefl::TL_DFT> src( p.ny, p.nx, 0.);
    cv::Mat grey, colored, show( p.ny, p.nx, CV_32F);
    cap >> last;
    cv::cvtColor( last, last, CV_BGR2GRAY); //convert colors

    while( true)
    {
        init_gaussian( src, 0.5+0.25*sin(t), 0.75, p.blob_width/p.lx, p.blob_width/p.ly, p.amp);
        cap >> current; // get a new frame from camera
        cv::cvtColor(current, current, CV_BGR2GRAY); //convert colors
        cv::GaussianBlur(current, current, cv::Size(21,21), 0, 0); //Kernel size, sigma_x, sigma_y
        calcOpticalFlowFarneback(last, current, flow, 0.5, 1, 5, 3,  5, 1.2, 0);
        cv::split( flow, v);
        //erster index y, zweiter index x
        for( unsigned i=0; i<v[0].rows; i++)
            for( unsigned j=0; j<v[0].cols; j++)
                vel.at<float>( i,j) = sqrt( v[0].at<float>(i,j)*v[0].at<float>(i,j) + v[1].at<float>(i,j)*v[1].at<float>(i,j) );
        for( unsigned i=0; i<vel.rows; i++)
            for( unsigned j=0; j<vel.cols; j++)
                if( vel.at<float>(i,j) < 1) vel.at<float>(i,j) = 0;
        //scale velocity to 1 in order to account for distance from camera
        double min, max;
        cv::minMaxLoc( vel, &min, &max);
        std::cout << min <<" "<<max<<std::endl;
        if( max > 1) // if someone is there
            for( unsigned i=0; i<vel.rows; i++)
                for( unsigned j=0; j<vel.cols; j++)
                    vel.at<float>( i,j) /= max;
        cv::flip( vel, vel, +1);
        //for( unsigned i=0; i<src.rows(); i++)
        //    for( unsigned j=0; j<src.cols(); j++)
        //        src(i,j) = 0.5*vel.at<double>(i,j);
        overhead.tic();
        //const toefl::Matrix<double, toefl::TL_DFT>& field = solver.getField( toefl::IMPURITIES); 
        const toefl::Matrix<double, toefl::TL_DFT>& field = solver.getField( toefl::ELECTRONS); 
        for( unsigned i=0; i<p.ny; i++)
            for( unsigned j=0; j<p.nx; j++)
                show.at<float>(i,j) = (float)field(i,j);
        cv::minMaxLoc( show, &min, &max);
        show.convertTo(grey, CV_8U, 255.0/(2.*max), 255.0/2.);
        cv::minMaxLoc( grey, &min, &max);

        //cv::applyColorMap( grey, colored, cv::COLORMAP_BONE);
        //cv::applyColorMap( grey, colored, cv::COLORMAP_COOL);
        //cv::applyColorMap( grey, colored, cv::COLORMAP_HOT);
        //cv::applyColorMap( grey, colored, cv::COLORMAP_HSV);
        //cv::applyColorMap( grey, colored, cv::COLORMAP_JET);
        cv::applyColorMap( grey, colored, cv::COLORMAP_OCEAN); 
        //cv::applyColorMap( grey, colored, cv::COLORMAP_PINK);
        //cv::applyColorMap( grey, colored, cv::COLORMAP_RAINBOW);
        //cv::applyColorMap( grey, colored, cv::COLORMAP_SPRING);
        //cv::applyColorMap( grey, colored, cv::COLORMAP_SUMMER);
        //cv::applyColorMap( grey, colored, cv::COLORMAP_AUTUMN);
        //cv::applyColorMap( grey, colored, cv::COLORMAP_WINTER);
        window_str << std::setprecision(2) << std::fixed;
        window_str << "time = "<<t;
        //cv::addText( colored, window_str.str(), cv::Point(50,50));
        window_str.str(""); 
        std::cout << colored.rows << " " << colored.cols<<"\n";
        std::cout << vel.rows << " " << vel.cols<<"\n";
        std::cout << show.rows << " " << show.cols<<"\n";
        std::cout << src.rows() << " " << src.cols()<<"\n";
        cv::imshow("Current", colored);
        //for( unsigned i=0; i<src.rows(); i++)
            //for( unsigned j=0; j<src.cols(); j++)
                //show.at<double>(i,j) = src(i,j);
        cv::imshow("Velocity", vel);


        timer.tic();
        for(unsigned i=0; i<p.itstp; i++)
        {
            toefl::Matrix<double, toefl::TL_DFT> voidmatrix( 2,2,(bool)toefl::TL_VOID);
            solver.step(src );
            t+= p.dt;
        }
        timer.toc();
        overhead.toc();

        //swap fields
        cv::Mat temp = last;
        last = current;
        current = temp;
        if(cv::waitKey(30) >= 0) break;
    }
    ////////////////////////////////glfw and opencv//////////////////////////////
    std::cout << "Average time for one step =                 "<<timer.diff()/(double)p.itstp<<"s\n";
    std::cout << "Overhead for visualisation, etc. per step = "<<(overhead.diff()-timer.diff())/(double)p.itstp<<"s\n";
    //////////////////////////////////////////////////////////////////
    fftw_cleanup();
    return 0;

}
示例#12
0
void OpticalFlowFarneback::calc(InputArray I0, InputArray I1, InputOutputArray flow)
{
    calcOpticalFlowFarneback(I0, I1, flow, pyrScale, numLevels, winSize, numIters, polyN, polySigma, flags);
}
void gazeEvaluatorThread::run()
{
// // 1  - Get the image
// ImageOf<PixelRgb> *imageIn = inPort->read(false);

// if(imageIn!=NULL)
// {
//     // Wrap the input image into a cv::Mat
//     cv::Mat matIn((IplImage*)imageIn->getIplImage());

//     // Prepare the output port
//     ImageOf<PixelRgb> imageOut;

//     // Resize the output image to be equal to the input image
//     imageOut.resize(*imageIn);

//     // Wrap the output image into a cv::Mat
//     cv::Mat matOut((IplImage*)imageOut.getIplImage());

//     // Copy the input mat into the output mat
//     matOut=matIn.clone();

//     // Send data
//     outPort->prepare()=imageOut;
//     outPort->write();   
// }
// 

    if(!optFlow.empty())
        optFlow.setTo(Scalar(0));

    if (isStarting)
    {
        ImageOf<PixelRgb> *tmp = inPort->read(false);

        if(tmp!=NULL)
        {
            imageInNext = *tmp;
            imageInPrev =  imageInNext;
            isStarting  = false;
            printMessage(0,"Starting..\n");
        }
    }
    else
    {
        // 1  - Get the image
        ImageOf<PixelRgb> *tmp = inPort->read(false);
        if(tmp!=NULL)
        {
            imageInNext = *tmp;
        }

        cv::Mat imgInNext((IplImage*)imageInNext.getIplImage());
        cv::Mat imgInPrev((IplImage*)imageInPrev.getIplImage());

        // 2B - Smooth it out
        cv::boxFilter(imgInNext, imgInNext, -1, cv::Size(4,3));

        // 4  - Make it gray
        Mat imgPrevGray;
        Mat imgNextGray;
        cvtColor(imgInPrev,imgPrevGray,CV_RGB2GRAY);
        cvtColor(imgInNext,imgNextGray,CV_RGB2GRAY);

        // 5 - Compute the optical flow
        if(!optFlow.empty())
            optFlow.setTo(Scalar(0));
        
        calcOpticalFlowFarneback(imgPrevGray,imgNextGray,optFlow,0.5,5,9,5,7,1.5,0);

        imageInPrev=imageInNext;

        if (!optFlow.empty() && !imgInPrev.empty())
        {
            sendOptFlow();
        }
    }
}
示例#14
0
/*	Member function
 * retuns the detected gesture ID
 * Inputs :		frame -> current frame to analyse
 * 				faceRegion -> current face Region
 */
airGestType airGest::analyseGesture( Mat frame ) {
	
	airGestType gest = GEST_INVALID;
	
	resize( frame,
			frame,
			Size( OPTFLW_FRAME_WIDTH, OPTFLW_FRAME_HEIGHT ),
			0,
			0,
			INTER_AREA );
	
	//Prepare canvas to draw intermediate result
	//canvas = frame.clone();
	canvas = Mat( OPTFLW_FRAME_HEIGHT, OPTFLW_FRAME_WIDTH, CV_8UC3, Scalar( 0, 0, 0 ) );
	//accCanvas = Mat( OPTFLW_FRAME_HEIGHT, OPTFLW_FRAME_WIDTH, CV_8UC3, Scalar( 0, 0, 0 ) );
	
	Mat grayFrame;
	//Convert to gray scale
    if( frame.channels() == 3 ) {
		cvtColor( frame, grayFrame, CV_BGR2GRAY );
	}
	else if( frame.channels() == 4 ) {
		cvtColor( frame, grayFrame, CV_BGRA2GRAY );
	}
	else {	//already gray
		grayFrame = frame.clone();
	}
	
	if( currState != AIRGEST_ACTIVE ) { //unless active, return with an invalid code
		prevFrame = grayFrame.clone();
		return gest;
	}
	
	//Here, airGest is active
	currFrame = grayFrame;
	//~ std::cout << "[airGest::analyseGesture] calculating optical flow....";
	calcOpticalFlowFarneback( prevFrame,  // first 8-bit single channel input image
                              currFrame,  // Second image of the same size and same type as prevgray
                              flowMap,    // computed flow image tha has the same size as pregray and type CV_32FC2
							  0.5,       // pryScale, 0.5 means classical pyramid
							  3,          // number of pyramid layers including initial image
                              20,         // winSize
                              3,          // number of iterations the algorithm does at each pyramid level
                              5,          // Size of the pixel neighborhood used to find polynomial expansion in each pixel
                              1.1,        // standard deviation of Guassian
                              OPTFLOW_FARNEBACK_GAUSSIAN );        //
    //~ std::cout << "[COMPLETED]\n";
    //~ std::cout << "-> boxFilter";
    boxFilter( flowMap, flowMap , -1, BLUR_KERNEL_SIZE );
    //~ std::cout << "-> drawFlowMap";
    drawFlowMap();
    //~ std::cout << "-> filterFlow";
    filterFlow();
    
    //imshow( "Current flow", canvas );
    //imshow( "Accumulated Flow", accCanvas );
	
	//copy current frame to prev for using next time
	prevFrame = currFrame.clone();
	
	gest = ( decision == -1.00 )? GEST_PREV:
		   ( decision == +1.00 )? GEST_NEXT:
		   GEST_INVALID;
	
	return gest;
}
示例#15
0
文件: testApp.cpp 项目: stdmtb/3s1e
//--------------------------------------------------------------
void testApp::setup()
{
	ofImage imageOf1, imageOf2;			//Load openFrameworks' images
	imageOf1.loadImage("crater1.png");
	imageOf2.loadImage("crater2.png");

	color1.setFromPixels( imageOf1 );	//Convert to ofxCv images
	color2.setFromPixels( imageOf2 );

	float decimate = 0.3;              //Decimate images to 30%
	ofxCvColorImage imageDecimated1;
	imageDecimated1.allocate( color1.width * decimate, 
                          color1.height * decimate );
	//High-quality resize
	imageDecimated1.scaleIntoMe( color1, CV_INTER_AREA );
	gray1 = imageDecimated1;

	ofxCvColorImage imageDecimated2;
	imageDecimated2.allocate( color2.width * decimate,
		                      color2.height * decimate );
	//High-quality resize
	imageDecimated2.scaleIntoMe( color2, CV_INTER_AREA );
	gray2 = imageDecimated2;
	

	Mat img1( gray1.getCvImage() );  //Create OpenCV images
	Mat img2( gray2.getCvImage() );
	Mat flow;                        //Image for flow
	//Computing optical flow
	  calcOpticalFlowFarneback( img1, img2, flow, 0.7, 3, 11, 5, 5, 1.1, 0 );
	//Split flow into separate images
	vector<Mat> flowPlanes;
	split( flow, flowPlanes );
	//Copy float planes to ofxCv images flowX and flowY
	IplImage iplX( flowPlanes[0] );
	flowX = &iplX;
	IplImage iplY( flowPlanes[1] );
	flowY = &iplY;

	//--------------------------------------------------------------------------
	//ATTENTION: Lines flowX = &iplX; and flowY = &iplY; can raise runtime error, 
	//caused by small bug in ofxOpenCV. 
	//So before running the example, fix it, as it described in testApp.h file
	//--------------------------------------------------------------------------

	w = gray1.width;
	h = gray1.height;

	//Flow image
	planeX = flowX;
	planeY = flowY;

	//create idX, idy
	idX.allocate( w, h );
	idY.allocate( w, h );
	for (int y=0; y<h; y++) {
		for (int x=0; x<w; x++) {
			idX.getPixelsAsFloats()[ x + w * y ] = x;
			idY.getPixelsAsFloats()[ x + w * y ] = y;
		}
	}

	//Load checkerboard image
	ofImage imageTest;
	imageTest.loadImage("checkerBoard.png");
	colorTest.setFromPixels( imageTest );

	//Make morphing at first time
	morphValue = 0;
	morphImageIndex = 1;
	updateMorph( morphValue, morphImageIndex );
}
示例#16
0
void VideoDemos( VideoCapture& surveillance_video, int starting_frame, bool clean_binary_images )
{
	Mat previous_gray_frame, optical_flow, optical_flow_display;
	Mat current_frame, thresholded_image, closed_image, first_frame;
	Mat current_frame_gray, running_average_background;
	Mat temp_running_average_background, running_average_difference;
	Mat running_average_foreground_mask, running_average_foreground_image;
	Mat selective_running_average_background;
	Mat temp_selective_running_average_background, selective_running_average_difference;
	Mat selective_running_average_foreground_mask, selective_running_average_background_mask, selective_running_average_foreground_image;
	double running_average_learning_rate = 0.01;
	surveillance_video.set(CV_CAP_PROP_POS_FRAMES,starting_frame);
	surveillance_video >> current_frame;
	first_frame = current_frame.clone();
	cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);
	current_frame.convertTo(running_average_background, CV_32F);
	selective_running_average_background = running_average_background.clone();
	int rad = running_average_background.depth();
	MedianBackground median_background( current_frame, (float) 1.005, 1 );
	Mat median_background_image, median_foreground_image;

	int codec = static_cast<int>(surveillance_video.get(CV_CAP_PROP_FOURCC));
	// V3.0.0 update on next line.  OLD CODE was    BackgroundSubtractorMOG2 gmm; //(50,16,true);
    Ptr<BackgroundSubtractorMOG2> gmm = createBackgroundSubtractorMOG2();
	Mat foreground_mask, foreground_image = Mat::zeros(current_frame.size(), CV_8UC3);

	double frame_rate = surveillance_video.get(CV_CAP_PROP_FPS);
	double time_between_frames = 1000.0/frame_rate;
	Timestamper* timer = new Timestamper();
	int frame_count = 0;
	while ((!current_frame.empty()) && (frame_count++ < 1000))//1800))
    {
 		double duration = static_cast<double>(getTickCount());
		vector<Mat> input_planes(3);
		split(current_frame,input_planes);
		cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);

		if (frame_count%2 == 0)  // Skip every second frame so the flow is greater.
		{
			if ( previous_gray_frame.data )
			{
				Mat lucas_kanade_flow;
				timer->ignoreTimeSinceLastRecorded();
				LucasKanadeOpticalFlow(previous_gray_frame, current_frame_gray, lucas_kanade_flow);
				timer->recordTime("Lucas Kanade Optical Flow");
				calcOpticalFlowFarneback(previous_gray_frame, current_frame_gray, optical_flow, 0.5, 3, 15, 3, 5, 1.2, 0);
				cvtColor(previous_gray_frame, optical_flow_display, CV_GRAY2BGR);
				drawOpticalFlow(optical_flow, optical_flow_display, 8, Scalar(0, 255, 0), Scalar(0, 0, 255));
				timer->recordTime("Farneback Optical Flow");
				char frame_str[100];
				sprintf( frame_str, "Frame = %d", frame_count);
 				Mat temp_output = JoinImagesHorizontally( current_frame, frame_str, optical_flow_display, "Farneback Optical Flow", 4 );
				Mat optical_flow_output = JoinImagesHorizontally( temp_output, "", lucas_kanade_flow, "Lucas Kanade Optical Flow", 4 );
				imshow("Optical Flow", optical_flow_output );
			}
			std::swap(previous_gray_frame, current_frame_gray);
		}
	
		// Static background image
		Mat difference_frame, binary_difference;
		Mat structuring_element(3,3,CV_8U,Scalar(1));
		timer->ignoreTimeSinceLastRecorded();
		absdiff(current_frame,first_frame,difference_frame);
		cvtColor(difference_frame, thresholded_image, CV_BGR2GRAY);
		threshold(thresholded_image,thresholded_image,30,255,THRESH_BINARY);
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,binary_difference,MORPH_OPEN,structuring_element);
			current_frame.copyTo(binary_difference, thresholded_image);
		}
		else
		{
			binary_difference.setTo(Scalar(0,0,0));
		    current_frame.copyTo(binary_difference, thresholded_image);
		}
		timer->recordTime("Static difference");

		// Running Average (three channel version)
		vector<Mat> running_average_planes(3);
		split(running_average_background,running_average_planes);
		accumulateWeighted(input_planes[0], running_average_planes[0], running_average_learning_rate);
		accumulateWeighted(input_planes[1], running_average_planes[1], running_average_learning_rate);
		accumulateWeighted(input_planes[2], running_average_planes[2], running_average_learning_rate);
		merge(running_average_planes,running_average_background);
		running_average_background.convertTo(temp_running_average_background,CV_8U);
		absdiff(temp_running_average_background,current_frame,running_average_difference);
		split(running_average_difference,running_average_planes);
		// Determine foreground points as any point with a difference of more than 30 on any one channel:
		threshold(running_average_difference,running_average_foreground_mask,30,255,THRESH_BINARY);
		split(running_average_foreground_mask,running_average_planes);
		bitwise_or( running_average_planes[0], running_average_planes[1], running_average_foreground_mask );
		bitwise_or( running_average_planes[2], running_average_foreground_mask, running_average_foreground_mask );
		if (clean_binary_images)
		{
			morphologyEx(running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
		running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(running_average_foreground_image, running_average_foreground_mask);
		timer->recordTime("Running Average");

		// Running Average with selective update
		vector<Mat> selective_running_average_planes(3);
		// Find Foreground mask
		selective_running_average_background.convertTo(temp_selective_running_average_background,CV_8U);
		absdiff(temp_selective_running_average_background,current_frame,selective_running_average_difference);
		split(selective_running_average_difference,selective_running_average_planes);
		// Determine foreground points as any point with an average difference of more than 30 over all channels:
		Mat temp_sum = (selective_running_average_planes[0]/3 + selective_running_average_planes[1]/3 + selective_running_average_planes[2]/3);
		threshold(temp_sum,selective_running_average_foreground_mask,30,255,THRESH_BINARY_INV);
		// Update background
		split(selective_running_average_background,selective_running_average_planes);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate,selective_running_average_foreground_mask);
    	invertImage(selective_running_average_foreground_mask,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		merge(selective_running_average_planes,selective_running_average_background);
		if (clean_binary_images)
		{
			morphologyEx(selective_running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,selective_running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
 		selective_running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(selective_running_average_foreground_image, selective_running_average_foreground_mask);
		timer->recordTime("Selective Running Average");

		// Median background
		timer->ignoreTimeSinceLastRecorded();
		median_background.UpdateBackground( current_frame );
		timer->recordTime("Median");
		median_background_image = median_background.GetBackgroundImage();
		Mat median_difference;
		absdiff(median_background_image,current_frame,median_difference);
		cvtColor(median_difference, median_difference, CV_BGR2GRAY);
		threshold(median_difference,median_difference,30,255,THRESH_BINARY);
		median_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(median_foreground_image, median_difference);

		// Update the Gaussian Mixture Model
 		// V3.0.0 update on next line.  OLD CODE was  gmm(current_frame, foreground_mask);
        gmm->apply(current_frame, foreground_mask);
		// Clean the resultant binary (moving pixel) mask using an opening.
		threshold(foreground_mask,thresholded_image,150,255,THRESH_BINARY);
		Mat moving_incl_shadows, shadow_points;
		threshold(foreground_mask,moving_incl_shadows,50,255,THRESH_BINARY);
		absdiff( thresholded_image, moving_incl_shadows, shadow_points );
		Mat cleaned_foreground_mask;
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,cleaned_foreground_mask,MORPH_OPEN,structuring_element);
		}
		else cleaned_foreground_mask = thresholded_image.clone();
 		foreground_image.setTo(Scalar(0,0,0));
        current_frame.copyTo(foreground_image, cleaned_foreground_mask);
		timer->recordTime("Gaussian Mixture Model");
		// Create an average background image (just for information)
        Mat mean_background_image;
		timer->ignoreTimeSinceLastRecorded();
		// V3.0.0 update on next line.  OLD CODE was   gmm.getBackgroundImage(mean_background_image);
        gmm->getBackgroundImage(mean_background_image);

		duration = static_cast<double>(getTickCount())-duration;
		duration /= getTickFrequency()/1000.0;
		int delay = (time_between_frames>duration) ? ((int) (time_between_frames-duration)) : 1;
		char c = cvWaitKey(delay);
		
		char frame_str[100];
		sprintf( frame_str, "Frame = %d", frame_count);
		Mat temp_static_output = JoinImagesHorizontally( current_frame, frame_str, first_frame, "Static Background", 4 );
		Mat static_output = JoinImagesHorizontally( temp_static_output, "", binary_difference, "Foreground", 4 );
        imshow("Static Background Model", static_output );
 		Mat temp_running_output = JoinImagesHorizontally( current_frame, frame_str, temp_running_average_background, "Running Average Background", 4 );
		Mat running_output = JoinImagesHorizontally( temp_running_output, "", running_average_foreground_image, "Foreground", 4 );
		imshow("Running Average Background Model", running_output );
 		Mat temp_selective_output = JoinImagesHorizontally( current_frame, frame_str, temp_selective_running_average_background, "Selective Running Average Background", 4 );
		Mat selective_output = JoinImagesHorizontally( temp_selective_output, "", selective_running_average_foreground_image, "Foreground", 4 );
        imshow("Selective Running Average Background Model", selective_output );
 		Mat temp_median_output = JoinImagesHorizontally( current_frame, frame_str, median_background_image, "Median Background", 4 );
		Mat median_output = JoinImagesHorizontally( temp_median_output, "", median_foreground_image, "Foreground", 4 );
        imshow("Median Background Model", median_output );
  		Mat temp_gaussian_output = JoinImagesHorizontally( current_frame, frame_str, mean_background_image, "GMM Background", 4 );
		Mat gaussian_output = JoinImagesHorizontally( temp_gaussian_output, "", foreground_image, "Foreground", 4 );
        imshow("Gaussian Mixture Model", gaussian_output );
		timer->putTimes( current_frame );
		imshow( "Computation Times", current_frame );
	 	surveillance_video >> current_frame;
	}
	cvDestroyAllWindows();
}