void KeypointTracker::trackOpticalFlow(const Mat &img){
  vector<uchar> status,status_backp;
  vector<float> err,err_backp;
  vector<Point2f> predict_pts,back_predict;
  vector<Mat> img_pyr;
  buildOpticalFlowPyramid(img,img_pyr,winSize,5,true);
  if(!prev_pyr.empty()){
    calcOpticalFlowPyrLK(prev_pyr, img_pyr, prev_pts, predict_pts, status, err, winSize, 5, termcrit, 0, 0.001);
    calcOpticalFlowPyrLK(img_pyr, prev_pyr,predict_pts, back_predict, status_backp, err_backp, winSize, 5, termcrit, 0, 0.001);  


    curr_pts.clear();
    curr_labels.clear();
  
    for (size_t pt_idx=0; pt_idx < back_predict.size() ; pt_idx++){
      if( norm(back_predict[pt_idx] - prev_pts[pt_idx]) < .5 ){
        if(status[pt_idx] == 1){
          curr_pts.push_back(predict_pts[pt_idx]);
          curr_labels.push_back(prev_labels[pt_idx]);
        }
      }
    }
  }
  prev_pyr=img_pyr;
}
示例#2
0
void SyntheticTrack::trackCorners(const cv::Mat& rendered_image,
                                  const cv::Mat& next_img,
                                  std::vector<cv::Point2f>& prev_pts,
                                  std::vector<cv::Point2f>& next_pts) {
    vector<KeyPoint> kps;
    FAST(rendered_image, kps, 7, true);

    if (kps.empty()) return;

    vector<Point2f> tmp_next_pts, tmp_prev_pts, backflow_pts;

    for (auto kp : kps) tmp_prev_pts.push_back(kp.pt);

    vector<uchar> next_status, prev_status;
    vector<float> next_errors, prev_errors;

    calcOpticalFlowPyrLK(rendered_image, next_img, tmp_prev_pts, tmp_next_pts,
                         next_status, next_errors);
    calcOpticalFlowPyrLK(next_img, rendered_image, tmp_next_pts, backflow_pts,
                         prev_status, prev_errors);

    for (int i = 0; i < tmp_prev_pts.size(); ++i) {
        float error = getDistance(backflow_pts[i], tmp_prev_pts[i]);
        float speed = getDistance(tmp_prev_pts[i], tmp_next_pts[i]);

        if (prev_status[i] == 1 && error < 5 && speed < 50) {
            // const int& id = ids[i];
            prev_pts.push_back(tmp_prev_pts[i]);
            next_pts.push_back(tmp_next_pts[i]);
        }
    }
}
示例#3
0
void GetTrackedPoints(const mat3b & im1, const mat3b & im2, vector<TrackedPoint> & points_out, 
		      int maxCorners, float qualityLevel, float minDistance, int blockSize,
		      int winSize_, int maxLevel, int criteriaN, float criteriaEps) {
#if 1
  const int useHarrisDetector = 0;
  const float k = 0.04f;
  const Size winSize(winSize_, winSize_);
  const TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
					     criteriaN, criteriaEps);
  const double derivLambda = 0;
  const int flags = 0;
  assert(im1.size() == im2.size());
  matb im1gray;
  cvtColor(im1, im1gray, CV_BGR2GRAY);
#ifdef OPENCV_2_1
  Mat mask;
  vector<Point2f> corners1, corners2;
  vector<uchar> status;
  vector<float> err;
  goodFeaturesToTrack(im1gray, corners1, maxCorners, qualityLevel, minDistance,
		      mask, blockSize, useHarrisDetector, k);
  calcOpticalFlowPyrLK(im1, im2, corners1, corners2, status, err, winSize, maxLevel,
		       criteria, derivLambda, flags);
  for (int i = 0; i < (signed)corners1.size(); ++i)
    if (status[i])
      points_out.push_back(TrackedPoint(corners1[i].x, corners1[i].y,
					corners2[i].x, corners2[i].y));
#else
  Mat corners1, corners2, status, err;
  goodFeaturesToTrack(im1gray, corners1, maxCorners, qualityLevel, minDistance,
		      noArray(), blockSize, useHarrisDetector, k);
  calcOpticalFlowPyrLK(im1, im2, corners1, corners2, status, err, winSize, maxLevel,
		       criteria, derivLambda, flags);
  for (int i = 0; i < corners1.size().height; ++i)
    if (status.at<unsigned char>(i,0))
      points_out.push_back(TrackedPoint(corners1.at<Vec2f>(i,0)[0],corners1.at<Vec2f>(i,0)[1],
					corners2.at<Vec2f>(i,0)[0],corners2.at<Vec2f>(i,0)[1]));
#endif
#else
  matb im1_gray, im2_gray;
  cvtColor(im1, im1_gray, CV_BGR2GRAY);
  cvtColor(im2, im2_gray, CV_BGR2GRAY);
  Mat flow_cv(im1.size().height, im1.size().width, CV_32FC2);
  calcOpticalFlowFarneback(im1_gray, im2_gray, flow_cv, 0.5, 5, 11, 10, 5, 1.1, 0);
  
  points_out.clear();
  for (int i = 20; i < im1.size().height-20; i += 20)
    for (int j = 20; j < im1.size().width-20; j += 20) {
      const Vec2f f = flow_cv.at<Vec2f>(i, j);
      points_out.push_back(TrackedPoint(j, i, j+f[0], i+f[1]));
    }
  cout << "n points " << points_out.size() << endl;
#endif
}
示例#4
0
/**
Tracks features using KLT optical flow
*/
int InterframeRegister::TrackFeatures(Mat fp, Mat fc,vector<Point2f> &pPrev, vector<Point2f> &pCurr, int cornerCount)
{
	// allocate memory
	vector <uchar> status;
	status.reserve(cornerCount);
	Mat err;
	err.reserve(cornerCount);
	vector<Point2f> tempPrev = pPrev;
	vector<Point2f> tempCurr = pCurr;

	// calculate optical flow
	calcOpticalFlowPyrLK(fp, fc,tempPrev, tempCurr, status,err, KLT_SEARCH_WIN,4, m_critTerm, OPTFLOW_LK_GET_MIN_EIGENVALS);

	// copy matched points to array
	int nMatchedPts = 0;
	for (int ind = 0; ind < cornerCount; ind++)
	{
		if (status[ind] != 0x00)
		{
			pPrev[nMatchedPts] = tempPrev[ind];
			pCurr[nMatchedPts] = tempCurr[ind];
			++nMatchedPts;
		}
	}
	return nMatchedPts;
}
示例#5
0
/**
 * This is the backup plan, when the normal way to find marker yields 0 result, we resort
 * to use optical flow from previous detected marker position.
 *
 * @param prev_frame previous captured frame, of 8UC1 type
 * @param frame current frame, also of 8UC1 / grayscale type
 * @param marker the previously detected Marker object, we're basically just looking at the points
 * @return true if optical flow found a candidate marker
 **/
bool MarkerDetector::opticalFlowPrediction( Mat& prev_frame, Mat& frame, Marker& marker ) {
    Mat status, error;
    vector<Point2f> new_points;
    
    /* Use sparse optical flow to predict where the points flowed to, dense version is too slow */
    calcOpticalFlowPyrLK( prev_frame, frame, marker.poly, new_points, status, error );
    
    /* Make sure that all points are detected */
    if ( sum( status ) != Scalar(4) )
        return false;
    
    /* make sure that it fits our candidates requirements */
    if( !checkPoints( new_points ))
        return false;
    
    /* just ignore it when it's already partially out of the scene */
    for( Point2f point : new_points ) {
        if( point.y <= 0 || point.x <= 0 || point.x >= frame.cols || point.y >= frame.rows )
            return false;
    }

    marker.poly.clear();
    copy( new_points.begin(), new_points.end(), back_inserter( marker.poly ));
    
    return true;
}
bool TrackerMedianFlowImpl::medianFlowImpl(Mat oldImage,Mat newImage,Rect2d& oldBox){
    std::vector<Point2f> pointsToTrackOld,pointsToTrackNew;

    Mat oldImage_gray,newImage_gray;
    cvtColor( oldImage, oldImage_gray, COLOR_BGR2GRAY );
    cvtColor( newImage, newImage_gray, COLOR_BGR2GRAY );

    //"open ended" grid
    for(int i=0;i<params.pointsInGrid;i++){
        for(int j=0;j<params.pointsInGrid;j++){
                pointsToTrackOld.push_back(
                        Point2f((float)(oldBox.x+((1.0*oldBox.width)/params.pointsInGrid)*j+.5*oldBox.width/params.pointsInGrid),
                        (float)(oldBox.y+((1.0*oldBox.height)/params.pointsInGrid)*i+.5*oldBox.height/params.pointsInGrid)));
        }
    }

    std::vector<uchar> status(pointsToTrackOld.size());
    std::vector<float> errors(pointsToTrackOld.size());
    calcOpticalFlowPyrLK(oldImage_gray, newImage_gray,pointsToTrackOld,pointsToTrackNew,status,errors,Size(3,3),5,termcrit,0);
    dprintf(("\t%d after LK forward\n",(int)pointsToTrackOld.size()));

    std::vector<Point2f> di;
    for(int i=0;i<(int)pointsToTrackOld.size();i++){
        if(status[i]==1){
            di.push_back(pointsToTrackNew[i]-pointsToTrackOld[i]);
        }
    }

    std::vector<bool> filter_status;
    check_FB(oldImage_gray,newImage_gray,pointsToTrackOld,pointsToTrackNew,filter_status);
    check_NCC(oldImage_gray,newImage_gray,pointsToTrackOld,pointsToTrackNew,filter_status);

    // filter
    for(int i=0;i<(int)pointsToTrackOld.size();i++){
        if(!filter_status[i]){
            pointsToTrackOld.erase(pointsToTrackOld.begin()+i);
            pointsToTrackNew.erase(pointsToTrackNew.begin()+i);
            filter_status.erase(filter_status.begin()+i);
            i--;
        }
    }
    dprintf(("\t%d after LK backward\n",(int)pointsToTrackOld.size()));

    if(pointsToTrackOld.size()==0 || di.size()==0){
        return false;
    }
    Point2f mDisplacement;
    oldBox=vote(pointsToTrackOld,pointsToTrackNew,oldBox,mDisplacement);

    std::vector<double> displacements;
    for(int i=0;i<(int)di.size();i++){
        di[i]-=mDisplacement;
        displacements.push_back(sqrt(di[i].ddot(di[i])));
    }
    if(getMedian(displacements,(int)displacements.size())>10){
        return false;
    }

    return true;
}
/* Calculation of Homography for flow based
 *
 **/
void AlignmentMatrixCalc::flowBasedHomography()
{
    std::vector<uchar>status;
    std::vector<float>err;
    std::vector<cv::Point2f>tempPrev;
    std::vector<cv::Point2f>tempCurrent;

    isHomographyCalc=false;

    pointsCurrent.clear();

    // flow calculation
    calcOpticalFlowPyrLK(prevFrame, currentFrame, pointsPrev, pointsCurrent, status, err);

    qDebug()<<"\n\n Prev Frame Features: "<<pointsPrev.size();

    // filtering flow points by threshold
    for (unsigned int i=0; i < pointsPrev.size(); i++)
    {
        if(status[i] && err[i] <= flowErrorThreshold)
        {
            tempPrev.push_back(pointsPrev[i]);
            tempCurrent.push_back(pointsCurrent[i]);
        }
        qDebug()<<status[i]<<" "<<err[i]<<"\n";
    }
    qDebug()<<"After Flow Filtered Features : "<<tempCurrent.size();

    // if enough flow points exist
    if(tempPrev.size() >= 4 && tempCurrent.size() >= 4)
    {
        homography = cv::findHomography(tempPrev, tempCurrent, homographyCalcMethod,
                                        ransacReprojThreshold);
        /*
         cv::findHomography can return empty matrix in some cases.
         This seems happen only when cv::RANSAC flag is passed.
         check the computed homography before using it
         */
        if(!homography.empty())
        {
            if(isHomographyValid()) //
            {
                isHomographyCalc = true;
            }
        }


    }
    if(isHomographyCalc == false)
    {
        // if valid homography not calculated returns to a second stage....
        stage=secondPass;

    }




}
示例#8
0
// ------------------------------------------------------
void FlowFinder::updateTrackingPoints( Image& frame )
{
	assert(frame.channels()==1);
	
	frame.cvImg.copyTo( gray );
	
	if( !prev_points.empty() )
	{
		vector<float> err;
		if(prevGray.empty())
			gray.copyTo(prevGray);
		
		calcOpticalFlowPyrLK(prevGray, gray, prev_points, curr_points, status, err, winSize,
							 3, termcrit, 0);
		
		H12 = cv::findHomography(cv::Mat(prev_points), cv::Mat(curr_points), status, cv::RANSAC, 4);
		
		size_t i, k;
		for( i = 0; i < curr_points.size(); i++ )
		{
			if( !status[i] )
				continue;
			
			circle( frame.cvImg, curr_points[i], 3, cv::Scalar(0,255,0), -1, 8);
			line( frame.cvImg, curr_points[i], prev_points[i], cv::Scalar(0,0,255), 1);
		}
		
		
		for( i = k = 0; i < curr_points.size(); i++ )
		{
			if( addRemovePt && norm(pt - curr_points[i]) <= 5)
			{
				addRemovePt = false;
				continue;
			}
			
			if( !status[i] )
				continue;
			
			curr_points[k++] = curr_points[i];
			//circle( frame->color,curr_points[i], 3, cv::Scalar(0,255,0), -1, 8);
			//line( frame->color, curr_points[i], prev_points[i], cv::Scalar(0,0,255), 1);
		}

		curr_points.resize(k);
	}
	
	if( addRemovePt && curr_points.size() < (size_t)MAX_COUNT )
	{
		vector<cv::Point2f> tmp;
		tmp.push_back(pt);
		cornerSubPix( gray, tmp, winSize, cvSize(-1,-1), termcrit);
		curr_points.push_back(tmp[0]);
		addRemovePt = false;
	}
	
	std::swap(curr_points, prev_points);
	cv::swap(gray, prevGray);
}
	// prevFrame: Gray-scale frame of previous frame
	// grayFrame: Grau-scale frame of current frame
	// points: input and output points to track
	// h**o: estimated homography matrix for current frame to previous frame
	void estimateHomography(const SoccerPitchData &data, Mat &prevFrame, Mat &grayFrame, vector<Point2f> &points, Mat &currToKeyTrans, Mat &keyToTopTrans, bool &initialized) {
		Mat topToCurrTrans;
		invert(keyToTopTrans * currToKeyTrans, topToCurrTrans);
		vector<Point2f> trackedPitchPoints;
		perspectiveTransform(data.pitchPoints, trackedPitchPoints, topToCurrTrans);		

		// delete points that are outside of image
		vector<Point2f> pitchPoints;
		removeOutsiders(data, trackedPitchPoints, pitchPoints);
		
		if (!prevFrame.empty()) {
			// relocate corners
			if (frameCounter >= 60) {
				if (frameCounter == 60) {
					relocatedCorners.clear();
					relocatedPitchPoints.clear();
					reprojErr = vector<double>(28, DBL_MAX);
				}
				cout<<"relocating corners"<<endl;
				findGoodCorners2(grayFrame, data, currToKeyTrans, keyToTopTrans);
				if (!relocatedCorners.empty())
					perspectiveTransform(relocatedCorners, relocatedCorners, currToPrevTrans);
				if (relocatedCorners.size() >= 6) {
					// recalib h**o
					if (relocatedCorners.size() == 12)
						cout<<"stop"<<endl;
					keyToTopTrans = findHomography(relocatedCorners, relocatedPitchPoints, RANSAC, 1);
					currToKeyTrans = Mat::eye(Size(3, 3), CV_64FC1);
					trackedPitchPoints = relocatedCorners;
					pitchPoints = relocatedPitchPoints;
					frameCounter = 0;
					relocateCounter = 0;
				}
				else{
					frameCounter++;
					relocateCounter++;
				}
			}
			else 
				frameCounter++;

			cout<<"doing optical flow tracking"<<endl; 
			calcOpticalFlowPyrLK(prevFrame, grayFrame, trackedPitchPoints, currPts, status, err, Size(21, 21), 3);
			
			currToPrevTrans = findHomography(currPts, trackedPitchPoints, RANSAC, 1);
			currToKeyTrans = currToPrevTrans * currToKeyTrans;

			vector<Point2f> keyPts;
			perspectiveTransform(currPts, keyPts, currToKeyTrans);
			Mat temp = findHomography(keyPts, pitchPoints, RANSAC, 1);
			if (!temp.empty())
				keyToTopTrans = temp.clone();
			else
				initialized = false;
			points = currPts;		
		}
		prevFrame = grayFrame;
	}
示例#10
0
void Tracker::track(const Mat im_prev, const Mat im_gray, const vector<Point2f> & points_prev,
        vector<Point2f> & points_tracked, vector<unsigned char> & status)
{
    FILE_LOG(logDEBUG) << "Tracker::track() call";

    if (points_prev.size() > 0)
    {
        vector<float> err; //Needs to be float

        //Calculate forward optical flow for prev_location
        calcOpticalFlowPyrLK(im_prev, im_gray, points_prev, points_tracked, status, err);

        vector<Point2f> points_back;
        vector<unsigned char> status_back;
        vector<float> err_back; //Needs to be float

        //Calculate backward optical flow for prev_location
        calcOpticalFlowPyrLK(im_gray, im_prev, points_tracked, points_back, status_back, err_back);

        //Traverse vector backward so we can remove points on the fly
        for (int i = points_prev.size()-1; i >= 0; i--)
        {
            float l2norm = norm(points_back[i] - points_prev[i]);

            bool fb_err_is_large = l2norm > thr_fb;

            if (fb_err_is_large || !status[i] || !status_back[i])
            {
                points_tracked.erase(points_tracked.begin() + i);

                //Make sure the status flag is set to 0
                status[i] = 0;
            }

        }

    }

    FILE_LOG(logDEBUG) << "Tracker::track() return";
}
void OpticalFlow::calculate(Mat prev, Mat img, Rect lastRect, bool clear) {
    if (clear) {
        data.boundingBox.width = 0;
        data.boundingBox.height = 0;
        data.points->clear();
    }
    if (!data.points[0].empty() && !data.blocked) {
        vector<uchar> status;
        vector<float> err;
        calcOpticalFlowPyrLK(prev, img, data.points[0], data.points[1], status, err, data.winSize, 5, data.termcrit, 0, 0.001);
        resetNotFoundPoints(status, lastRect);
        movePoints(lastRect.width, lastRect.height);
        calculateRegion();
    }
}
示例#12
0
文件: OCRer.cpp 项目: peaceleven/pad
void calcImageDisplacement(Mat img1, Mat img2, vector<uchar> *status, vector<float> *err) {
	vector<Point2f> corners1, corners2;
	goodFeaturesToTrack(img1, corners1, N_CORNERS, FEATURE_QUALITY, MIN_DISTANCE);

	int nIterations = 30;
	double epislon = .01f;
	TermCriteria tc (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, nIterations, epislon);
	Size winSize = Size(3, 3);
	Size zeroZone = Size(-1, -1);
	cornerSubPix(img1, corners1, winSize, zeroZone, tc);

	int maxLevel = 3;
	calcOpticalFlowPyrLK(img1, img2, corners1, corners2,
	                     (*status), (*err), winSize, maxLevel);
}
示例#13
0
Point2i SamplingOpticalFlow( const Mat &fir,
		const Mat &sec,
        const Mat &mask,
		vector<Point2f> &pt_src,
		vector<Point2f> &flo)
{
	bool useDense = USEDENSE;
    double scalar = 1;
	Mat src; cvtColor( fir, src, CV_RGB2GRAY);
	Mat dst; cvtColor( sec, dst, CV_RGB2GRAY);
    resize(src, src, Size((int)src.rows*scalar, (int)src.cols*scalar));
    resize(dst, dst, Size((int)dst.rows*scalar, (int)dst.cols*scalar));
    Point2i imgsize(src.cols, src.rows);
//	
    if (!useDense)
	{
		vector<Point2f> pt_dst; vector<uchar> status;vector<float>err;
        pt_src.clear();
        for ( int h = 0; h <src.rows; h+=2)
            for ( int w = 0; w < src.cols; w+=2)
                pt_src.push_back( Point2f(w,h));
        
		pt_dst.reserve( pt_src.size());
		flo.reserve( pt_src.size());
		calcOpticalFlowPyrLK( src, dst, pt_src, pt_dst, status, err);
		for ( uint n = 0; n < pt_src.size(); ++n)
		{
			flo.push_back(Point2f(pt_src[n].x - pt_dst[n].x, pt_src[n].y - pt_dst[n].y));
            if(!status[n] || dist(Point( pt_src[n].x, pt_src[n].y), Point(pt_src[n].x + flo[n].x, pt_src[n].y + flo[n].y)) >= INITPSIZEW)
				flo[n].x = flo[n].y = 0;
		}
	}
	else
	{
		Mat floMat;
		calcOpticalFlowFarneback( src, dst, floMat, 0.75, 3, 30, 10, 5, 1.5, OPTFLOW_FARNEBACK_GAUSSIAN);
		for (int y = 0; y<floMat.rows; y++) {
			for (int x = 0; x < floMat.cols; x++) {
				if ( !mask.at<int>(y,x))
					flo.push_back(Point2f(floMat.at<float>(y,2*x), floMat.at<float>(y,2*x+1)));
				else
					flo.push_back(Point2f(0.0f,0.0f));
			}
		}
	}
    return imgsize;
}
示例#14
0
文件: sensor.c 项目: samfoo/sentry
void step_tracking() {
    if (features_current.size() < MIN_FEATURES_SIZE) {
        tracking_object = false;
    }

    if (tracking_object &&
            tracking_density() < MIN_DENSITY_BEFORE_RETRACK &&
            tracking_density() > 0) {
        movement_buffer.setTo(cv::Scalar::all(0));
        cv::circle(movement_buffer, features_center, 50, cv::Scalar(255, 255, 255),
                CV_FILLED);

        tracking_object = false;
    }

    if (!tracking_object) {
        cv::goodFeaturesToTrack(
                image_current,
                features_current,
                20, 0.02, 4,
                movement_buffer);

        tracking_object = true;
        return;
    }

    std::vector<cv::Point2f> features_prev = features_current;
    std::vector<uchar> features_status;
    std::vector<float> features_error;

    if (features_current.size() > 0) {
        std::vector<cv::Point2f> next = std::vector<cv::Point2f>();
        features_current = std::vector<cv::Point2f>();

        calcOpticalFlowPyrLK(image_prev, image_current,
                features_prev, next,
                features_status, features_error);

        for (int i=0; i < features_status.size(); i++) {
            if (features_status[i]) {
                features_current.push_back(next[i]);
            }
        }
    }
}
示例#15
0
void ProcessingThread::OpticalFlowHandle(Mat &previmg, Mat lastimg, vector<Point2f> &prev_pts, vector<Point2f> &orig_pts)
{
	DBG_InitOutputImage();
	DBG_CreateOutputFromImage(lastimg);
	Mat nextimg, mask, m_error;
	vector<Point2f> next_pts, tracked_pts, orig_pts_new;
	vector<uchar> m_status;

	cvtColor(lastimg, nextimg, CV_BGR2GRAY);
	if (orig_pts.size() != 8)
	{
		prev_pts.clear();
		
		//goodFeaturesToTrack(nextimg, prev_pts, 10, 0.05, 0.2, mask);
		if (CrossDetect(nextimg, prev_pts))
		{
			cvtColor(lastimg, previmg, CV_BGR2GRAY);
			orig_pts = prev_pts;
		}
	}
	else
	{
		//cvtColor(lastimg, nextimg, CV_BGR2GRAY);
		if (prev_pts.size() > 0 && !previmg.empty())
		{
			calcOpticalFlowPyrLK(previmg, nextimg, prev_pts, next_pts, m_status, m_error);
		}
		for (int i = 0; i < m_status.size(); i++)
		{
			int j = 1;
			if (m_status[i])
			{
				tracked_pts.push_back(next_pts[i]);
				DBG_DrawOutputCircle(next_pts[i]);
				orig_pts_new.push_back(orig_pts[i]);
			}
		}

		orig_pts = orig_pts_new;
		prev_pts = tracked_pts;
		nextimg.copyTo(previmg);
		DBG_WriteFrame(dh_out);
	}

}
示例#16
0
void MainWindow::doMotionTrack()
{
    static const cv::TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
    static cv::Mat gray, prevGray;

    static std::vector<cv::Point2f> points[2];

    if (!rawImage.empty()) {
        try {
            cv::cvtColor(rawImage, gray, CV_RGB2GRAY);
            rawImage.copyTo(transformedImage);

            if (init) {
                init = false;
                cv::goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 1);
                cv::cornerSubPix(gray, points[1],cv::Size(5,5), cv::Size(-1,-1),termcrit);
                qDebug() << "Doing flow init, found " << points[1].size() << " interesting points.";
            }
            else if( !points[0].empty() ) {
                std::vector<uchar> status;
                std::vector<float> err;
                if(prevGray.empty())
                    gray.copyTo(prevGray);
                calcOpticalFlowPyrLK(prevGray, gray, points[0], points[1], status, err);
                size_t i, k;
                for(i = k = 0; i < points[1].size(); i++ ) {
                    if( !status[i] )
                        continue;
                    points[1][k++] = points[1][i];
                    cv::circle( transformedImage, points[1][i], 3, cv::Scalar(255,0,0));
                }
                points[1].resize(k);
            }
            std::swap(points[1], points[0]);
            cv::swap(prevGray, gray);
        }
        catch (cv::Exception e) {
            qDebug() << e.what();
            transform = NONE;
        }
    }
}
示例#17
0
void OpticalFlow::computeOpticalFlow(Mat& cf){
	Mat current_frame;
	cvtColor(cf, current_frame, COLOR_BGR2GRAY);
	if( points[0].empty() ){
		buildPointGrid(current_frame);
	}
	if (buckets.empty()){
		buildBuckets(6, 30.0, 10);
		cout <<"first bucket angle :" << buckets[0].angle_max << endl;
	}
	if( !previous_frame.empty() ){
		vector<double> distance(points[0].size()), angle(points[0].size());
		vector<uchar> status;
		vector<float> err;

		calcOpticalFlowPyrLK(previous_frame, current_frame, points[0],
					points[1], status, err, winSize, 3, termcrit, 0, 0.01);	
		for( int i = 0; i < points[1].size(); i++ ){
                	if( !status[i] )
                    		continue;
			distance[i] = computeDistance(points[0][i], points[1][i]);
			angle[i] = computeAngle(points[0][i], points[1][i]);
			status[i] = assignBucket(distance[i], angle[i]);
	    	}
		Bucket max_bucket = maxBucket();
		cout << max_bucket.getCount() << endl;
		for( int i = 0; i < points[1].size(); i++ ){
                	if( !status[i])
                    		continue;
			if(!max_bucket.inBucket(distance[i], angle[i])){
				drawFlow(points[0][i], points[1][i], false, cf);
			}
	    	}
		//debug	
		if ( debug_ ){
			imshow(windowName, previous_frame);
		}
	}
	// update for next frame
	previous_frame = current_frame; // current frame becomes previous frame.
}
void FullFrameTransform::getWholeFrameTransform(Mat img1, Mat img2){
	FeaturesInfo fi1 = extractFeaturesToTrack(img1);
	FeaturesInfo fi2 = extractFeaturesToTrack(img2);

	int length = max(fi1.features.size(), fi2.features.size());
	
	std::vector<uchar> features_found; 
	features_found.reserve(length);
	std::vector<float> feature_errors; 
	feature_errors.reserve(length);

	calcOpticalFlowPyrLK( fi1.pyramid, fi2.pyramid, fi1.features, fi2.features, features_found, feature_errors ,
		Size( WIN_SIZE, WIN_SIZE ), 5,
		 cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 30, 0.01 ), 0 );

	//wholeFrameTransform = densityWeightedSvd(fi1.features, fi2.features, features_found.size());
	//wholeFrameTransform = prunedNonWeightedSvd(fi1.features, fi2.features, features_found.size());
	wholeFrameTransform = WelschFit(fi1.features, fi2.features, features_found.size());
	//wholeFrameTransform = RansacNonWeightedSvd(fi1.features, fi2.features, features_found.size());
	//wholeFrameTransform = nonWeightedSvd(fi1.features, fi2.features, features_found.size());
}
示例#19
0
	//--------------------------------------------------
	vector<KeyPoint> MatchableImage::updateFeatures( MatchableImage& previous, Mat& H )
	{	
		
		vector<Point2f> prevPts; KeyPoint::convert(previous.features, prevPts);
		int nPrevPts = prevPts.size();
		vector<Point2f> nextPts;
		vector<uchar> status;
		vector<float> err;
		calcOpticalFlowPyrLK(previous.cvImage, cvImage, prevPts, nextPts, status, err);
		
		
		
		features.clear();
		for(int i=0; i<nPrevPts; i++)
		{
			if(status[i]>0)
			{
				KeyPoint feature = previous.features[i];
				feature.pt = nextPts[i];
				features.push_back( feature );
			}
		}
		int pctPtsLost=features.size() / (float)nPrevPts;
		
		
		if(nPrevPts>3)
		{
			Mat prevMat = points2mat(prevPts);
			Mat nextMat = points2mat(nextPts);
			
			H = findHomography(prevMat, nextMat, status, CV_RANSAC, 2);
			int ptsUsedInHomography=0;
			for(int i=0; i<status.size(); i++)
			{
				if(status[i]>0) ptsUsedInHomography++;
			}
			//log(LOG_LEVEL_DEBUG, "%f%% points used in homography", (ptsUsedInHomography/(float)npts)*100);
		}
		return features;
	}
vector<Point2f> track_dots(IplImage* gray, IplImage* prevGray, vector<Point2f> points, CvRect* faceBox)
{
	//expands region to search for dots
	faceBox->x -= faceBox->width/2;
	if(faceBox->x < 0)
	{
		faceBox->x = 0;
	}
	faceBox->width *= 2;
	if(faceBox->x + faceBox->width > gray->width)
	{
		faceBox->width = gray->width - faceBox->x;
	}
	faceBox->y -= faceBox->height/2;
	if(faceBox->y < 0)
	{
		faceBox->y = 0;
	}
	faceBox->height *= 2;
	if(faceBox->y + faceBox->height > gray->height)
	{
		faceBox->height = gray->height - faceBox->y;
	}

	cvSetImageROI(gray,*faceBox);
	cvSetImageROI(prevGray,*faceBox);

	vector<Point2f> tempPoints;
	vector<uchar> status;
    vector<float> err;
	TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,1,0.3);
	Size winSize(20,20);

    calcOpticalFlowPyrLK(prevGray, gray, points, tempPoints, status, err, winSize, 0, termcrit, 0);

	cvResetImageROI(gray);
	cvResetImageROI(prevGray);
	return tempPoints;
}
示例#21
0
void LucasKanadeOpticalFlow(Mat& previous_gray_frame, Mat& gray_frame, Mat& display_image)
{
	Size img_sz = previous_gray_frame.size();
	int win_size = 10;
	cvtColor(previous_gray_frame, display_image, CV_GRAY2BGR);
	vector<Point2f> previous_features, current_features;
	const int MAX_CORNERS = 500;
	goodFeaturesToTrack(previous_gray_frame, previous_features, MAX_CORNERS, 0.05, 5, noArray(), 3, false, 0.04);
	cornerSubPix(previous_gray_frame, previous_features, Size(win_size, win_size), Size(-1,-1),
                 TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
    vector<uchar> features_found;
	calcOpticalFlowPyrLK(previous_gray_frame, gray_frame, previous_features, current_features, features_found, noArray(),
                         Size(win_size*4+1,win_size*4+1), 5,
                         TermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ));
    for( int i = 0; i < (int)previous_features.size(); i++ )
	{
		if( !features_found[i] )
			continue;
        circle(display_image, previous_features[i], 1, Scalar(0,0,255));
		line(display_image, previous_features[i], current_features[i], Scalar(0,255,0));   
	}
}
void TrackerMedianFlowImpl::check_FB(const Mat& oldImage,const Mat& newImage,
        const std::vector<Point2f>& oldPoints,const std::vector<Point2f>& newPoints,std::vector<bool>& status){

    if(status.size()==0){
        status=std::vector<bool>(oldPoints.size(),true);
    }

    std::vector<uchar> LKstatus(oldPoints.size());
    std::vector<float> errors(oldPoints.size());
    std::vector<double> FBerror(oldPoints.size());
    std::vector<Point2f> pointsToTrackReprojection;
    calcOpticalFlowPyrLK(newImage, oldImage,newPoints,pointsToTrackReprojection,LKstatus,errors,Size(3,3),5,termcrit,0);

    for(int i=0;i<(int)oldPoints.size();i++){
        FBerror[i]=l2distance(oldPoints[i],pointsToTrackReprojection[i]);
    }
    double FBerrorMedian=getMedian(FBerror);
    dprintf(("point median=%f\n",FBerrorMedian));
    dprintf(("FBerrorMedian=%f\n",FBerrorMedian));
    for(int i=0;i<(int)oldPoints.size();i++){
        status[i]=(FBerror[i]<FBerrorMedian);
    }
}
示例#23
0
void MotionDetection(cv::Mat frame1, cv::Mat frame2)
{
    cv::Mat prev, next;
    cvtColor(frame1, prev, CV_BGR2GRAY); 
    cvtColor(frame2, next, CV_BGR2GRAY); 
    goodFeaturesToTrack( prev, 
            corners,
            maxCorners,
            qualityLevel,
            minDistance,
            cv::Mat(),
            blockSize,
            useHarrisDetector,
            k );
    cornerSubPix(prev, 
            corners,
            cvSize( 10, 10 ) ,
            cvSize( -1, -1 ), 
            cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );
    std::vector<uchar> features_found;
    features_found.reserve(maxCorners);
    std::vector<float> feature_errors;
    feature_errors.reserve(maxCorners);
    calcOpticalFlowPyrLK(prev, next, corners, corners_b, features_found, 
            feature_errors, cvSize( 10, 10 ), 5, cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0);
    IplImage g = next;
    for( int i = 0; i < maxCorners; ++i )
    {
        CvPoint p0 = cvPoint( cvRound( corners[i].x ), cvRound( corners[i].y ) );
        CvPoint p1 = cvPoint( cvRound( corners_b[i].x ), cvRound( corners_b[i].y ) );
        cvLine( &g, p0, p1, CV_RGB(255,0,0), 3, CV_AA );
    }
    cv::Mat rs(&g);
    imshow( "result window", rs );  
    int key = cv::waitKey(5);
}
示例#24
0
int TrackThread::process(Mat &frame,Mat &output)
{
	int re=0;
	cvtColor (frame,gray,CV_BGR2GRAY);
	frame.copyTo (output);
	//特征点太少了,重新检测特征点
	//if(points[0].size ()<=10)
	{
		goodFeaturesToTrack (gray,//图片
			features,//输出特征点
			max_count,//特征点最大数目
			qlevel,//质量指标
			minDist);//最小容忍距离
		//插入检测到的特征点
		//	points[0].insert (points[0].end (),features.begin (),features.end ());
		//	initial.insert (initial.end (),features.begin (),features.end ());

		points[0]=features;
		initial=features;
	}
	//第一帧
	if(gray_prev.empty ()){
		gray.copyTo (gray_prev);
	}
	//根据前后两帧灰度图估计前一帧特征点在当前帧的位置
	//默认窗口是15*15
	calcOpticalFlowPyrLK (
		gray_prev,//前一帧灰度图
		gray,//当前帧灰度图
		points[0],//前一帧特征点位置
		points[1],//当前帧特征点位置
		status,//特征点被成功跟踪的标志
		err);//前一帧特征点点小区域和当前特征点小区域间的差,根据差的大小可删除那些运动变化剧烈的点

	int k = 0;
	//去除那些未移动的特征点
	for(int i=0;i<points[1].size();i++){
		if(acceptTrackedPoint (i))
		{
			initial[k]=initial[i];
			points[1][k++] = points[1][i];
		}
	}
	points[1].resize (k);
	initial.resize (k);
	//标记被跟踪的特征点

	for(int i=0;i<points[1].size ();i++)
	{
		//当前特征点到初始位置用直线表示
		line(output,initial[i],points[1][i],Scalar(20,150,210));
		//当前位置用圈标出
		circle(output,points[1][i],3,Scalar(120,250,10));
	}
	if(points[1].size()!=0)
		trend(output);
	//向心速度
	curx*=0.9;
	cury*=0.9;
	lens=min(output.rows,output.cols)/8;
	edge=min(output.rows,output.cols)/5;
	//控制域
	rectangle(output,
		Point(output.cols/2-lens,output.rows/2-lens),
		Point(output.cols/2+lens,output.rows/2+lens),
		Scalar(1,100,200),3);
	//追踪域
	rectangle(output,
		Point(edge,edge),
		Point(output.cols-edge,output.rows-edge),
		Scalar(10,210,1),3);
	if(first){
		if(dirx==1){
			re=4;
			line(output,
				Point(output.cols/2+lens,output.rows/2-lens),
				Point(output.cols/2+lens,output.rows/2+lens),
				Scalar(20,250,110),5);
		}
		else if(dirx==-1){
			re=3;
			line(output,
				Point(output.cols/2-lens,output.rows/2-lens),
				Point(output.cols/2-lens,output.rows/2+lens),
				Scalar(20,250,110),5);
		}
		if(diry==1){
			re=2;
			line(output,
				Point(output.cols/2-lens,output.rows/2+lens),
				Point(output.cols/2+lens,output.rows/2+lens),
				Scalar(20,250,110),5);
		}
		else if(diry==-1){
			re=1;
			line(output,
				Point(output.cols/2-lens,output.rows/2-lens),
				Point(output.cols/2+lens,output.rows/2-lens),
				Scalar(20,250,110),5);
		}
	}
	//为下一帧跟踪初始化特征点集和灰度图像
	circle(output,Point(output.cols/2+curx,output.rows/2+cury),10,Scalar(2,1,250),3);
	std::swap(points[1],points[0]);
	cv::swap(gray_prev,gray);
	return re;
}
示例#25
0
string optical_flow::get_final_direction(Mat m1, Mat m2,
		Rect old_hand_boundary, Rect new_hand_boundary) {

	left_count = 0;
	up_count = 0;
	down_count = 0;
	right_count = 0;
	non_count = 0;

	TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);

	cvtColor(m1, old_gray_img, COLOR_BGR2GRAY);//convert to gray
	cvtColor(m2, new_gray_img, COLOR_BGR2GRAY);

	// extract features
	goodFeaturesToTrack(old_gray_img, old_frame_points, max_count, .01,
			min_distance, Mat(), block_size, 0, .04);
	cornerSubPix(old_gray_img, old_frame_points, Size(10, 10), Size(-1, -1),
			termcrit);

	// track features in next frame
	vector<uchar> status;
	vector<float> err;
	calcOpticalFlowPyrLK(old_gray_img, new_gray_img, old_frame_points,
			new_frame_points, status, err, Size(10, 10), 3, termcrit, 0, 0.001);

	for (unsigned int i = 0; i < new_frame_points.size(); i++) {
		Point2f old_point = old_frame_points.at(i);
		Point2f new_point = new_frame_points.at(i);
		if (!(old_hand_boundary.contains(old_point)
				&& new_hand_boundary.contains(new_point)))
			continue;
		float dist = get_distance(old_point, new_point);
		//		cout<<dist<<endl;
		if (dist < threshold) {
			directions.push_back(non_direction);
			non_count++;
		} else {
			float dx = new_point.x - old_point.x;
			float dy = new_point.y - old_point.y;
			if (abs(dx) > abs(dy)) {//horizontal
				if (abs(dx) <= thresh_in_on_dir)
					non_count++;
				else {
					if (dx < 0) {
						directions.push_back(left_direction);
						left_count++;
					} else {
						directions.push_back(right_direction);
						right_count++;
					}
				}
			} else { //vertical
				if (abs(dy) <= thresh_in_on_dir)
					non_count++;
				else {
					if (dy < 0) {
						directions.push_back(up_direction);
						up_count++;
					} else {
						directions.push_back(down_direction);
						down_count++;
					}
				}
			}
		}
	}

	int dirs_counts[] = { up_count, down_count, left_count, right_count,
			non_count };
	int max_elem = *max_element(dirs_counts, dirs_counts + 5);
	//	cout<<up_count << " "<<down_count<<" "<<left_count<<" " <<right_count<<" "<<non_count<<endl;
	final_direction = "";
	if (up_count == max_elem)
		final_direction = "up";
	else if (down_count == max_elem)
		final_direction = "down";
	else if (left_count == max_elem)
		final_direction = "left";
	else if (right_count == max_elem)
		final_direction = "right";
	else
		final_direction = "none";

	return final_direction;
}
示例#26
0
// Lucas-Kanade optical flow
void QOpenCvImageBox::lucasKanadeOF( IplImage* img ) {
    Point2f pt;
    TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03);  // ERROR

    bool addRemovePt = false;

    Size winSize(10,10);

    cvtColor(image, gray, CV_BGR2GRAY);

    if( nightMode )
        image = Scalar::all(0);

    if( needToInit )
    {
        // automatic initialization
        goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.04);
        cornerSubPix(gray, points[1], winSize, Size(-1,-1), termcrit);
        addRemovePt = false;
    }
    else if( !points[0].empty() )
    {
        vector<uchar> status;
        vector<float> err;
        if(prevGray.empty())
            gray.copyTo(prevGray);
        calcOpticalFlowPyrLK(prevGray, gray, points[0], points[1], status, err, winSize, 3, termcrit, 0);
        size_t i, k;
        for( i = k = 0; i < points[1].size(); i++ )
        {
            if( addRemovePt )
            {
                if( norm(pt - points[1][i]) <= 5 )
                {
                    addRemovePt = false;
                    continue;
                }
            }

            if( !status[i] )
                continue;

            points[1][k++] = points[1][i];
            circle( image, points[1][i], 3, Scalar(0,255,0), -1, 8);
        }
        points[1].resize(k);
    }

    if( addRemovePt && points[1].size() < (size_t)MAX_COUNT )
    {
        vector<Point2f> tmp;
        tmp.push_back(pt);
        cornerSubPix( gray, tmp, winSize, cvSize(-1,-1), termcrit);
        points[1].push_back(tmp[0]);
        addRemovePt = false;
    }

    needToInit = false;
    /*
    imshow("LK Demo", image);

    char c = (char)waitKey(10);
    if( c == 27 )
        break;
    switch( c )
    {
    case 'r':
        needToInit = true;
        break;
    case 'c':
        points[1].clear();
        break;
    case 'n':
        nightMode = !nightMode;
        break;
    default:
        ;
    }
    */

    std::swap(points[1], points[0]);
    swap(prevGray, gray);
}
void SparsePyrLkOptFlowEstimator::run(
        InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
        OutputArray status, OutputArray errors)
{
    calcOpticalFlowPyrLK(frame0, frame1, points0, points1, status, errors, winSize_, maxLevel_);
}
示例#28
0
// Thought: simply use the bounding box of the feature points as the head.
void HeartFeatureTracker::track(Mat &colorImage, Mat &depthImage) {

    Mat gray;
    vector<uchar> status;
    vector<float> err;
    vector<Point2f> points;

    cvtColor(depthImage, gray, COLOR_BGR2GRAY);

    Mat roiColor = depthImage(bbox);

    Mat roi = gray(bbox);


    if(prevPoints.empty()) {
        return;
    }

    calcOpticalFlowPyrLK(prevGray, roi, prevPoints, points, status, err, Size(10,10),
                         3, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03), 0, 0.001);


    Mat transform = estimateRigidTransform(prevPoints, points, false);

    //applyTransformToPoints(boundBox, transform);
    //applyTransformToPoints(patchOfInterest, transform);

    size_t i, k;
    bool brokeOut = false;
    for( i = k = 0; i < points.size(); i++ ) {
        if( !status[i] ) {
            continue;
        }

        points[k++] = points[i];
        circle( roiColor, points[i], 3, Scalar(0,255,0), -1, 8);
    }


    points.resize(k);

    Rect dumbRect = boundingRect(points);
    convertRectToMats(boundBox, dumbRect);
    rectangle(roiColor, dumbRect, Scalar(0,255,0));


    convertRectToMats(patchOfInterest, getForeheadFromBbox(dumbRect));

    RotatedRect dumbRotated = minAreaRect(points);
    Point2f rect_points[4];
    dumbRotated.points(rect_points);
    Scalar color = Scalar( 255, 0, 0 );

    for( int j = 0; j < 4; j++ )
        line( roiColor, rect_points[j], rect_points[(j+1)%4], color, 1, 8 );

    DrawBoxFromPoints(boundBox, roiColor);
    DrawBoxFromPoints(patchOfInterest, roiColor);


    prevGray = roi;
    prevPoints = points;
}
std::pair<Mat, Mat> 
ImageRegistrator::registerImages(ImageList inputImages, int resizeFactor, int cornersAmount)
{
    Scaller                     scaller;
    MatrixList                  homographies;
    ImageList::const_iterator   selectedImage = inputImages.begin();
    ImageList::iterator         nthImage = inputImages.begin();
    PointVector                 selectedImageCorners(cornersAmount);
    PointVector                 nthImageCorners(cornersAmount);
    std::vector<uchar>          status(cornersAmount);
    std::vector<float>          error(cornersAmount);

    goodFeaturesToTrack(
            *selectedImage,
            selectedImageCorners,
            cornersAmount,
            0.01,
            1);

    cv::cornerSubPix(
            *selectedImage,
            selectedImageCorners,
            Size(5, 5),
            Size(-1, -1),
            TermCriteria(
                TermCriteria::COUNT + TermCriteria::EPS,
                6000,
                0.001)
            );


    for (; nthImage != inputImages.end(); ++nthImage) {
        if (nthImage != selectedImage) {
            calcOpticalFlowPyrLK(
                    *selectedImage,
                    *nthImage,
                    selectedImageCorners,
                    nthImageCorners,
                    status,
                    error);

            PointVector selImgCor = removeBadPoints(selectedImageCorners, status);
            PointVector nthImgCor = removeBadPoints(nthImageCorners, status);

            Mat H = findHomography(
                    selImgCor,
                    nthImgCor,
                    CV_RANSAC,
                    0.1);

            if (cv::norm(Point2f(H.at<double>(0,2), H.at<double>(1,2))) > 2) {
                nthImage->release();
                continue;
            }

            roundMatrixCoefficients(H, resizeFactor);
            homographies.push_back(H);
        }
    }  
    
    inputImages.erase(std::remove_if(inputImages.begin(),
                inputImages.end(),
                ImageRegistrator::ImageRemPred()),
            inputImages.end());
        
    inputImages = scaller.upscaleImages(inputImages, resizeFactor); 
    MatrixList::iterator h = homographies.begin();

    for (nthImage = inputImages.begin(); nthImage != inputImages.end(); ++nthImage) {
        if (nthImage != selectedImage) {
            util::printMatrix(*h, 12);
            warpPerspective(
                    nthImage->clone(),
                    *nthImage,
                    *h,
                    nthImage->size(),
                    cv::INTER_NEAREST | cv::WARP_INVERSE_MAP);
            ++h;
        }
    }

    Mat                 output(selectedImage->size(), selectedImage->type());
    std::list<uchar>    pixelValues;
    Mat                 medianWeights(output.size(), output.type());
    
    for (int i = 0; i < selectedImage->rows ; ++i) {
        for (int j = 0; j < selectedImage->cols; ++j) {

            for (nthImage = inputImages.begin(); nthImage != inputImages.end(); ++nthImage) {
                uchar value = (*nthImage).at<uchar>(i,j);

                if (value != 0)  {
                    pixelValues.push_back(value);
                }
            }

            if ( !pixelValues.empty() ) {
                output.at<uchar>(i,j) = produceMedian(pixelValues);
                medianWeights.at<uchar>(i,j) = 
                    static_cast<uchar>(std::sqrt(pixelValues.size()));
            }

            pixelValues.clear();
        }
    }

    std::cout << "pixel covreage : " << pixelCovreage(output) << std::endl;

    Mat fullMedian(output.size(), output.type());
    cv::medianBlur(output, fullMedian, 1);

    for (int i = 0; i < output.rows ; ++i) {
        for (int j = 0; j < output.cols; ++j) {
            if (output.at<uchar>(i,j) == 0) {
                output.at<uchar>(i,j) = fullMedian.at<uchar>(i,j);
            }
        }
    }
    util::printImage(output, std::string("tada"));

    return std::pair<Mat, Mat>(output, medianWeights);
}
bool LKTracker::trackf2f(const Mat& img1, const Mat& img2, vector<Point2f> &points1, vector<cv::Point2f> &points2)
{
	//TODO!:implement c function cvCalcOpticalFlowPyrLK() or Faster tracking function
#ifdef  MEASURE_TIME
	clock_t startTime = clock();
#endif
#ifndef USE_CUDA
	//Forward-Backward tracking
	calcOpticalFlowPyrLK(img1, img2, points1, points2, status, similarity, window_size, level, term_criteria, lambda, 0/**/);

	//TomHeaven注释:这里的过滤可能漏掉一些运动速度快的目标,故而去掉过滤
	calcOpticalFlowPyrLK(img2, img1, points2, pointsFB, FB_status, FB_error, window_size, level, term_criteria, lambda, 0);
#else
	/////////// GPU 加速
	/*Mat p1(1, points1.size(), CV_32FC2), p2(1, points2.size(), CV_32FC2);
	// copy data
	for (int i = 0; i < points1.size(); ++i) {
		Vec2f& t1 = p1.at<Vec2f>(0, i);
		t1[0] = points1[i].x;
		t1[1] = points1[i].y;	
	}

	for (int i = 0; i < points2.size(); ++i) {
		Vec2f& t2 = p2.at<Vec2f>(0, i);
		t2[0] = points2[i].x;
		t2[1] = points2[i].y;
	}
	gPoints1.upload(p1);
	gPoints2.upload(p2);*/
	// 上传数据
	gImg1.upload(img1);
	gImg2.upload(img2);
	upload(points1, gPoints1);
	upload(points2, gPoints2);


	//计算正反光流
	gFlow->sparse(gImg1, gImg2, gPoints1, gPoints2, gStatus); // compute gPoints2
	gFlow->sparse(gImg2, gImg1, gPoints2, gPointsFB, gFBStatus); //compute gPointsFB
	
	download(gPoints2, points2);
	download(gStatus, status);
	download(gPointsFB, pointsFB);
#endif

#ifdef  MEASURE_TIME
	clock_t endTime = clock();
	printf("OF time = %.3f\n", double(endTime - startTime) / CLOCKS_PER_SEC);
#endif
	
	//printf("p1: rows = %d, cols = %d, type = %d\n", p1.rows, p1.cols, p1.type());
	//printf("gPoints1: rows = %d, cols = %d, type = %d\n", gPoints1.rows, gPoints1.cols, gPoints1.type());
	//printf("pointsFB: size = %d, point[0] = %f, %f\n", pointsFB.size(), pointsFB[0].x, pointsFB[0].y);
	
	//Compute the real FB-error
	FB_error.clear();
	for (int i = 0; i < points1.size(); ++i)
	{
		FB_error.push_back(norm(pointsFB[i] - points1[i]));
	}
	//Filter out points with FB_error[i] > mean(FB_error) && points with sim_error[i] > mean(sim_error)
	normCrossCorrelation(img1, img2, points1, points2);
	return filterPts(points1, points2);
	//return true;
}