Example #1
0
public:bool analizarFlujo(IplImage *img, IplImage *imgAnterior, CvRect *rect) {

            cvSetImageROI(img, *rect);
            IplImage *imgA1 = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
            cvCopy(img, imgA1);

            cvSetImageROI(imgAnterior, *rect);
            IplImage *imgB1 = cvCreateImage(cvGetSize(imgAnterior), imgAnterior->depth, imgAnterior->nChannels);
            cvCopy(imgAnterior, imgB1);

            cvResetImageROI(img);
            cvResetImageROI(imgAnterior);

            cvNamedWindow( "img", 1);
            cvNamedWindow( "imgA", 1);
            cvShowImage( "img", imgA1);
            cvShowImage( "imgA", imgB1);


            int py = imgA1->height;
            int px = imgA1->width;
            IplImage *imgA=cvCreateImage( cvSize(px,py),IPL_DEPTH_8U, 1);
            IplImage *imgB=cvCreateImage( cvSize(px,py),IPL_DEPTH_8U, 1);
            cvCvtColor( imgA1, imgA, CV_BGR2GRAY ); //
            cvCvtColor( imgB1, imgB, CV_BGR2GRAY ); //

            CvSize img_sz = cvGetSize( imgA );

            /////////////////////////////
            IplImage *eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
            IplImage *tmp_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
            int corner_count = MAX_CORNERS;
            CvSize pyr_sz;
            int win_size = 5;


            cvGoodFeaturesToTrack(imgA,eig_image,tmp_image,cornersA,&corner_count,0.01,5.0,0,3,0,0.04);
            cvFindCornerSubPix(imgA,cornersA,corner_count,cvSize(win_size,win_size),cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,1));
            // Call the Lucas Kanade algorithm

            pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
            pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
            pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );

            cvCalcOpticalFlowPyrLK(imgA,imgB,pyrA,pyrB,cornersA,cornersB,corner_count,cvSize( win_size,win_size ),5,features_found,feature_errors,cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .03 ),0);
            // Now make some image of what we are looking at:
            float dx=0.0;

            for (int i=0; i<corner_count; i++) {
                if( features_found[i]==0|| feature_errors[i]>100 ) {continue;}

                dx=sqrt((cornersA[i].x-cornersB[i].x)*(cornersA[i].x-cornersB[i].x)+(cornersA[i].y-cornersB[i].y)*(cornersA[i].y-cornersB[i].y));
                if(dx>1 && dx<50){
                    return true;
                } else {
                    return false;
                }
            }
            return false;
    }
Example #2
0
void mexFunction(int plhs_size, mxArray *plhs[], int prhs_size, const mxArray *prhs[])
{
    // Load images

    if (prhs_size ==4) {
        win_size = *mxGetPr(prhs[3]);
    }

    int N = mxGetN(prhs[0]);
    int M = mxGetM(prhs[0]);
    grey0 = cvCreateImage( cvSize(N, M), 8, 1 );
    grey1 = cvCreateImage( cvSize(N, M), 8, 1 );
    loadImageFromMatlab(prhs[0],grey0);
    loadImageFromMatlab(prhs[1],grey1);

    // Load feature points
    double *fp = mxGetPr(prhs[2]);

    int num_pts = mxGetN(prhs[2]);
    points[0] = (CvPoint2D32f*)cvAlloc(num_pts*sizeof(points[0][0]));
    points[1] = (CvPoint2D32f*)cvAlloc(num_pts*sizeof(points[0][0]));
    char *status = (char*)cvAlloc(num_pts);
    float *error = (float*) cvAlloc(num_pts*sizeof(float));
    for (int i = 0; i < num_pts; i++) {
        points[0][i].x = fp[2*i];
        points[0][i].y = fp[2*i+1];
    }
    // neni treba, urychleni z fpt 40 -> fps 200
    //cvFindCornerSubPix( grey0, points[0], num_pts, cvSize(win_size,win_size), cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));

    pyramid1 = cvCreateImage( cvGetSize(grey1), 8, 1 );
    pyramid0 = cvCreateImage( cvGetSize(grey1), 8, 1 );

    cvCalcOpticalFlowPyrLK( grey0, grey1, pyramid0, pyramid1, points[0], points[1], num_pts, cvSize(win_size,win_size), 6, status, error, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), 0 );

    // Output

    plhs[0] = mxCreateDoubleMatrix(6, num_pts, mxREAL);
    double *output = mxGetPr(plhs[0]);
    for (int i = 0; i < num_pts; i++) {
        output[6*i]   = (double) points[0][i].x;
        output[6*i+1] = (double) points[0][i].y;
        output[6*i+2] = (double) points[1][i].x;
        output[6*i+3] = (double) points[1][i].y;
        output[6*i+4] = (double) error[i];
        output[6*i+5] = (double) status[i];

        //output[5*i+5] = (double) error[i];
    }

    // Tidy up
    cvReleaseImage( &pyramid0 );
    cvReleaseImage( &pyramid1 );
    cvReleaseImage( &grey0 );
    cvReleaseImage( &grey1 );
    return;
}
Example #3
0
int opticalflow( char * im1fname, char * im2fname, CvPoint2D32f * &source_points, CvPoint2D32f * &dest_points, char * &status )
{
	int count = MAX_COUNT;
	double quality = 0.15;
	// double min_distance = 2;
	double min_distance = 3;
	int block_size = 7;
	int use_harris = 0;
	int win_size = 10;
	int flags = 0;

	source_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	dest_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	
	IplImage * image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_GRAYSCALE);

	IplImage * eigenvalues = cvCreateImage(cvGetSize(image1), 32, 1);
	IplImage * temp = cvCreateImage(cvGetSize(image1), 32, 1);

	cvGoodFeaturesToTrack( image1, eigenvalues, temp, source_points, &count,
			quality, min_distance, 0, block_size, use_harris, 0.04 );

	printf("%d features\n",count);

	setbuf(stdout, NULL);

	printf("Finding corner subpix...");
	cvFindCornerSubPix( image1, source_points, count,
			cvSize(win_size,win_size), cvSize(-1,-1),
			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
	printf("done.\n");

	cvReleaseImage(&eigenvalues);
	cvReleaseImage(&temp);

	IplImage * image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_GRAYSCALE);

	status = (char*)cvAlloc(sizeof(char)*MAX_COUNT);

	IplImage * pyramid = cvCreateImage( cvGetSize(image1), IPL_DEPTH_8U, 1 );
	IplImage * second_pyramid = cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 1 );

	printf("Computing optical flow...");	
	cvCalcOpticalFlowPyrLK(image1, image2, pyramid, second_pyramid, source_points,
		dest_points, count, cvSize(win_size,win_size), 4, status, 0,
		cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03),
		flags);
	printf("done.\n");

	cvReleaseImage( &image1 );
	cvReleaseImage( &image2 );

	cvReleaseImage( &pyramid );
	cvReleaseImage( &second_pyramid );

	return count;
}
Example #4
0
/* track a number of KLT features with an n-stage pyramid
*/
void OpticalFlow::LKPyramids(IplImage* prevImage, IplImage* currImage,
                             int prev_indx, int curr_indx)
{
  CvTermCriteria criteria =
    cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
  int flags = 0;
  if (m_num_features_tracked>0) {
    flags |= CV_LKFLOW_PYR_A_READY;
  }

  /* note: m_num_pyramid_levels can only be changed before the
   * playback is started.  To release this restriction, the m_pyramids
   * must be re-initialized, that is pyr0_ready set appropriately.
  */

  // in last frame, and possibly during Get/SetFeatures, how many of
  // them were lost?
  ASSERT((int)m_feature_status.size()>=m_target_num_features);
  if (m_target_num_features>0) {
    cvCalcOpticalFlowPyrLK(prevImage, // frame A
                           currImage,   // frame B
                           m_pyramids[prev_indx], // buffer for pyramid for A
                           m_pyramids[curr_indx], // buffer for pyramid for B
                           // feature points to track in A
                           (CvPoint2D32f*) &m_features[prev_indx][0], 
                           // calculated positions in B
                           (CvPoint2D32f*) &m_features[curr_indx][0],
                           // number of feature points to track
                           m_target_num_features,
                           // search window size per pyramid level
                           cvSize(m_winsize_width, m_winsize_height),
                           // max number of pyramid levels
                           m_num_pyramid_levels,
                           // array pos will be set to 1 if corresponding
                           // feature point was found
                           &m_feature_status[0],
                           // may be NULL, diff btw old
                           // and new area around features
                           &m_errors[0],
                           criteria, // iteration termination criteria
                           flags  // todo: put estimate, see Documentation
      );

    int count = m_num_features_tracked = m_target_num_features;
    for (int cnt1=0, k=0; cnt1<count; cnt1++) {
      if (m_feature_status[cnt1] && m_errors[cnt1]<m_max_feature_error) {
        m_features[prev_indx][k] = m_features[prev_indx][cnt1];
        m_features[curr_indx][k] = m_features[curr_indx][cnt1];
        k++;
      } else {
        m_feature_status[cnt1] = 0;
        m_num_features_tracked --;
      }
    }
  }
  m_num_features_lost = m_target_num_features-m_num_features_tracked;
}
Example #5
0
// ######################################################################
Point2D<int> NeoBrain::trackObject(const Image<byte>& grey)
{
  itsRefreshSpeechFile.setVal(false);

  Point2D<int> targetLoc(-1,-1);

#ifdef HAVE_OPENCV
  if (itsAllowTracking.getVal() && itsTracking)
  {
    if (count > 0)
    {
      IplImage* tmp1 = img2ipl(prev_grey);
      IplImage* tmp2 = img2ipl(grey);

      cvCalcOpticalFlowPyrLK(tmp1, tmp2, prev_pyramid, pyramid,
                             points[0], points[1], count,
                             cvSize(win_size,win_size), 3, status, 0,
                             cvTermCriteria(CV_TERMCRIT_ITER
                                            |CV_TERMCRIT_EPS,
                                            20,0.03), flags);

      cvReleaseImageHeader(&tmp1);
      cvReleaseImageHeader(&tmp2);

      flags |= CV_LKFLOW_PYR_A_READY;

      //show track points
      int k, i;
      for(i = k = 0; i<count; i++)
      {
        if (!status[i])
          continue;
        points[1][k++] = points[1][i];

        targetLoc.i = std::min(grey.getWidth()-1, std::max(0, (int)points[1][i].x));
        targetLoc.j = std::min(grey.getHeight()-1, std::max(0, (int)points[1][i].y));
        ASSERT(grey.coordsOk(targetLoc));
      }
      count = k;

    }

    IplImage *swap_temp;
    CV_SWAP( prev_pyramid, pyramid, swap_temp );
    CV_SWAP( points[0], points[1], swap_points );

    moveHeadToTarget();
  }
  prev_grey = grey;
#endif

  return targetLoc;
}
Example #6
0
// ######################################################################
std::vector<Point2D<int> > NeoBrain::getTrackersLoc(const Image<byte>& grey)
{
  std::vector<Point2D<int> > trackersLoc;

#ifdef HAVE_OPENCV
  if (itsAllowTracking.getVal() && itsTracking)
  {
    if (count > 0)
    {
      IplImage* tmp1 = img2ipl(prev_grey);
      IplImage* tmp2 = img2ipl(grey);

      cvCalcOpticalFlowPyrLK(tmp1, tmp2, prev_pyramid, pyramid,
                             points[0], points[1], count,
                             cvSize(win_size,win_size), 3, status, 0,
                             cvTermCriteria(CV_TERMCRIT_ITER
                                            |CV_TERMCRIT_EPS,
                                            20,0.03), flags);

      cvReleaseImageHeader(&tmp1);
      cvReleaseImageHeader(&tmp2);

      flags |= CV_LKFLOW_PYR_A_READY;

      //show track points
      int k, i;
      for(i = k = 0; i<count; i++)
      {
        if (!status[i])
          continue;
        points[1][k++] = points[1][i];

        Point2D<int> tracker(std::min(grey.getWidth()-1, std::max(0, (int)points[1][i].x)),
            std::min(grey.getHeight()-1, std::max(0, (int)points[1][i].y)));
        trackersLoc.push_back(tracker);
      }
      count = k;

    }

    IplImage *swap_temp;
    CV_SWAP( prev_pyramid, pyramid, swap_temp );
    CV_SWAP( points[0], points[1], swap_points );
  }
  prev_grey = grey;
#endif

  return trackersLoc;
}
void ofxOpticalFlow::calc(ofxCvGrayscaleImage& prevFrame, ofxCvGrayscaleImage& currentFrame){
	int cornerCount = MAX_CORNERS;
	cvGoodFeaturesToTrack(prevFrame.getCvImage(), eigImg, tempImg, cornersPrev, &cornerCount, 0.01, 5.0, 0, 3, 0, 0.04);
	
	cvFindCornerSubPix(prevFrame.getCvImage(), cornersPrev, cornerCount, 
					   windowSize, cvSize(-1,-1), 
					   cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
	
	cvCalcOpticalFlowPyrLK(prevFrame.getCvImage(), currentFrame.getCvImage(), pyrPrev, 
						   pyrCurr, cornersPrev, cornersCurr, cornerCount, windowSize, 
						   5, featuresFound, NULL,
						   cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03), 0);
	
	flowPoints.clear();
	for(int i=0; i<cornerCount; i++){
		if(featuresFound[i] == 0){
			continue;
		}
		flowPoints.push_back(ofxCvFlowPoint(cornersPrev[i].x, cornersPrev[i].y, cornersCurr[i].x, cornersCurr[i].y));
	}
}
Example #8
0
void HarrisBuffer::OpticalFlowFromLK()
{
  //cvCalcOpticalFlowLK(prevgray, gray, cvSize(15,15), OFx, OFy);
  //cvCalcOpticalFlowHS(prevgray, gray, 0, OFx, OFy, 0.1, cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS,100,1e5));

  float subf=5;
  int xsz=gray->width, ysz=gray->height;
  int pxn=int(xsz/subf), pyn=int(ysz/subf);
  CvPoint2D32f *p1 = new CvPoint2D32f[pxn*pyn];
  CvPoint2D32f *p2 = new CvPoint2D32f[pxn*pyn];
  for (int i=0; i<pyn; i++)
    for (int j=0; j<pxn; j++){
      p1[i*pxn+j]=cvPoint2D32f(j*subf,i*subf);
      p2[i*pxn+j]=cvPoint2D32f(j*subf,i*subf);
    }

    char *sts = new char[pxn*pyn];
    CvTermCriteria termination = cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 100, 1e5);
    cvCalcOpticalFlowPyrLK(prevgray, gray, NULL, NULL, 

      p1, p2, int(pxn*pyn), cvSize(int(10),int(10)),

      3,sts,NULL,termination,CV_LKFLOW_INITIAL_GUESSES);

    IplImage* OFxsub= cvCreateImage(cvSize(pxn,pyn),IMGTYPE,1);
    IplImage* OFysub= cvCreateImage(cvSize(pxn,pyn),IMGTYPE,1);
    IMG_ELEM_TYPE *ptrOFxsub=(IMG_ELEM_TYPE*)cvPtr2D(OFxsub,0,0);
    IMG_ELEM_TYPE *ptrOFysub=(IMG_ELEM_TYPE*)cvPtr2D(OFysub,0,0);
    for (int i=0; i<pyn; i++)
      for (int j=0; j<pxn; j++){
        ptrOFxsub[i*pxn+j]=p2[i*pxn+j].x-p1[i*pxn+j].x;
        ptrOFysub[i*pxn+j]=p2[i*pxn+j].y-p1[i*pxn+j].y;

      }
      cvResize(OFxsub,OFx,CV_INTER_NN);
      cvResize(OFysub,OFy,CV_INTER_NN);
      cvReleaseImage(&OFxsub);
      cvReleaseImage(&OFysub);
}
Example #9
0
void KLTWrapper::RunTrack(IplImage * imgGray, IplImage * prevGray)
{
	int i, k;
	int nMatch[MAX_COUNT];

	if (prevGray == 0) {
		prevGray = imgPrevGray;
	} else {
		flags = 0;
	}

	memset(image->imageData, 0, image->imageSize);
	if (count > 0) {
		cvCalcOpticalFlowPyrLK(prevGray, imgGray, prev_pyramid, pyramid,
				       points[0], points[1], count, cvSize(win_size, win_size), 3, status, 0, cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03), flags);
		flags |= CV_LKFLOW_PYR_A_READY;
		for (i = k = 0; i < count; i++) {
			if (!status[i]) {
				continue;
			}

			nMatch[k++] = i;
		}
		count = k;
	}

	if (count >= 10) {
		// Make homography matrix with correspondences
		MakeHomoGraphy(nMatch, count);
	} else {
		for (int ii = 0; ii < 9; ++ii) {
			matH[ii] = ii % 3 == ii / 3 ? 1.0f : 0.0f;
		}
	}

	InitFeatures(imgGray);
}
cvg_bool OpticalFlow::process(IplImage *frame, cvg_int *prevFrameNumFeatures, CvPoint2D32f **prevFrameFeatures, char **foundFeaturesInCurrentFrame, CvPoint2D32f **currentFrameFeatures) {
	// Convert image to black & white
	cvConvertImage(frame, currentFrame);

	cvg_bool res = !firstRun;
	if (!firstRun) {
		// Find displacements of the features
		CvSize opticalFlowWindow = cvSize(WINDOW_SIZE, WINDOW_SIZE);
		CvTermCriteria terminationCriteria = cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3);
		cvCalcOpticalFlowPyrLK(	lastFrame, currentFrame, pyramid1, pyramid2,
								lastFrameFeatures, newFeatures, lastFrameFeaturesCount,
								opticalFlowWindow, PYRAMID_LEVELS, foundFeatures, errorFeatures,
								terminationCriteria, 0
								);

		if (prevFrameNumFeatures != NULL) (*prevFrameNumFeatures) = lastFrameFeaturesCount;
		if (prevFrameFeatures != NULL) {
			memcpy(lastFrameFeaturesCopy, lastFrameFeatures, lastFrameFeaturesCount * sizeof(CvPoint2D32f));
			(*prevFrameFeatures) = lastFrameFeaturesCopy;
		}
		if (foundFeaturesInCurrentFrame != NULL) (*foundFeaturesInCurrentFrame) = foundFeatures;
		if (currentFrameFeatures != NULL) (*currentFrameFeatures) = newFeatures;

		if (showDebug) outputDebugInfo(frame, lastFrameFeaturesCount, foundFeatures, lastFrameFeatures, newFeatures);

	} else firstRun = false;

	lastFrameFeaturesCount = maxNumFeatures;
	// Find good features to track in the current frame
	cvGoodFeaturesToTrack(currentFrame, eigImage, tempImage, lastFrameFeatures, &lastFrameFeaturesCount, MIN_FEATURE_QUALITY_TO_ACCEPT, 0.01, NULL);

	// Store current frame for the next iteration
	cvCopy(currentFrame, lastFrame);

	return res;
}
void FeatureTracker::track_features(geometry_msgs::PoseStamped mapPose){
  //set the initial number of features to the max number we want to find
  int feature_count=num_features;
  printf("pose %f %f %f\n",mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
  int edge_pixels=5;
  
  //check if there were features from the last image to keep tracking
  if(last_feature_count>0){
    //if there were call cvCalcOpticalFlowPyrLK();
    //find matches between last good features and current image features
    //    store matches in featuresB
    cvCalcOpticalFlowPyrLK(last_image,image_rect,pyrA,pyrB,features,featuresB, last_feature_count,cvSize(win_size,win_size) ,4,last_features_status,track_error, cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,.3),0);
  }
  
  printf("got image flow\n");
  //    assign last_feature_id values for matched features and set the non matched spots to -1
  
  //find new features and subpixel them
  
  //I SHOULD ADD THE IMAGE FLOW VALUES AS FEATURES NOW BEFORE FINDING NEW FEATURES
  
  //find all good features
  cvGoodFeaturesToTrack(image_rect, eigImage, tempImage, features, &feature_count, quality_level, min_distance, NULL, block_size);
  
  //subpixel good features
  cvFindCornerSubPix(image_rect,features,feature_count,cvSize(win_size,win_size),cvSize(-1,-1),cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
  
  
  printf("subpixeled image\n");
  
  //for all the features in features B, find their matches in the newly found features
  //add all the matches to their correct featuremanager, for the non matching, make a new
  //feature manager and add them to it
  
  //for all features by now we need their ray and the robot pose at that location
  //draw dots on image where features are
  
  
  
  //set the feature ids to a control value
  
  for(int i=0;i<num_features;i++){
    current_feature_id[i]=-1;
  }
  
  for(int i=0;i<last_feature_count;i++){
    //for the previously found features in list b
    if(last_features_status[i]>0){
      for(int j=0;j<feature_count;j++){
	//for every feature found in this image
	
	//determine if the two overlap in a meaningful way
	int xdiff=featuresB[i].x-features[j].x;
	int ydiff=featuresB[i].y-features[j].y;
	//if the pixels are within some margin of eachother
	if(sqrt(xdiff*xdiff + ydiff*ydiff)<pixel_tracking_margin){
	  //if they do set the current id for j to the id of i
	  current_feature_id[j]=last_feature_id[i];
	  printf("feature found %d %d",last_feature_id[i],i);
	}
      }
    }
  }
  
  printf("assigned IDs image\n");
  
  
  for(int i=0;i<feature_count;i++){
    
    printf("looping\n");
    if(current_feature_id[i]>=0){
    printf("prev feature match\n");
      //if we matched a previous feature
      //add our new feature to the previous features list
      cv::Point3d tempRay;
      cv::Point2d tempPoint=cv::Point2d(features[i]);
      cam_model.projectPixelTo3dRay(tempPoint,tempRay);
      
      if(tempPoint.x> edge_pixels && tempPoint.x < last_image->width- edge_pixels &&
	tempPoint.y> edge_pixels && tempPoint.y<last_image->height- edge_pixels){
	featureList[current_feature_id[i]].add(RawFeature(mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation), tempPoint,tempRay));
      }else{
	current_feature_id[i]=-1;
      }
      
    }else{
    printf("new feature \n");
      
      cv::Point3d tempRay;
      cv::Point2d tempPoint=cv::Point2d(features[i]);
      cam_model.projectPixelTo3dRay(tempPoint,tempRay);
      if(tempPoint.x> edge_pixels && tempPoint.x < last_image->width- edge_pixels &&
	tempPoint.y> edge_pixels && tempPoint.y<last_image->height- edge_pixels){
	printf("new good feature \n");
	//if we didn't
	//create a new feature group in the list
	current_feature_id[i]=feature_number;
	//add the new feature to the feature list
	featureList.push_back(FeatureManager());

	featureList[feature_number].add(RawFeature(mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation), tempPoint,tempRay));
	++feature_number;
      }
    }
  }
   
//  printf("features: ");
  for(int i=0;i<num_features;i++){
    if(i<feature_count){
     last_feature_id[i]=current_feature_id[i];
    }
    else{
      last_feature_id[i]=-1;
    }
 //   printf(" %d ",current_feature_id[i]);
  }
  printf("\n");
  
  
  last_feature_count=feature_count;
  
}
Example #12
0
// ######################################################################
Point2D<int> VisualTracker::trackObjects(const Image<byte>& grey)
{
  Point2D<int> targetLoc(-1,-1);

  if (!itsTracking)
    return targetLoc;

#ifdef HAVE_OPENCV
  if (itsCurrentNumPoints > 0)
  {
    IplImage* pGrey = img2ipl(itsPreviousGreyImg);
    IplImage* cGrey = img2ipl(grey);

    //flags = CV_LKFLOW_INITIAL_GUESSES;

    cvCalcOpticalFlowPyrLK(pGrey, cGrey, itsPreviousPyramid, itsCurrentPyramid,
        itsPreviousPoints, itsCurrentPoints,
        itsCurrentNumPoints, //number of feature points
        cvSize(itsTrackWindowSize.getVal(),itsTrackWindowSize.getVal()), //search window size in each pyramid
        3, // maximal pyramid level nummber
        itsTrackStatus,
        itsTrackError,
        cvTermCriteria(CV_TERMCRIT_ITER
          |CV_TERMCRIT_EPS,
          20,0.03), itsTrackFlags);

    itsTrackFlags = CV_LKFLOW_PYR_A_READY | CV_LKFLOW_PYR_B_READY;

    cvReleaseImageHeader(&pGrey);
    cvReleaseImageHeader(&cGrey);


    //show track points
    int k, i;
    for(i = k = 0; i<itsCurrentNumPoints; i++)
    {
      if (!itsTrackStatus[i])
        continue;

      itsCurrentPoints[k++] = itsCurrentPoints[i];
      //LINFO("Error %i: %f", i, itsTrackError[i]);
      if (itsTrackError[i] < 2000)
      {
        targetLoc.i = std::min(grey.getWidth()-1, std::max(0, (int)itsCurrentPoints[i].x));
        targetLoc.j = std::min(grey.getHeight()-1, std::max(0, (int)itsCurrentPoints[i].y));
        ASSERT(grey.coordsOk(targetLoc));
      }
    }
    itsCurrentNumPoints = k;

  }

  IplImage *swap_temp;
  CV_SWAP( itsPreviousPyramid, itsCurrentPyramid, swap_temp );
  CvPoint2D32f* swap_points;
  CV_SWAP( itsPreviousPoints, itsCurrentPoints, swap_points );

  itsPreviousGreyImg = grey;


  if (itsUseKalman && grey.coordsOk(targetLoc))
  {
    float Z[2];
    CvMat Zmat = cvMat(2,1,CV_32F, Z);

    Z[0] = targetLoc.i;
    Z[1] = targetLoc.j;

    cvKalmanCorrect(itsKalman, &Zmat);
    const CvMat* prediction = cvKalmanPredict(itsKalman, 0);

    //generate measurement
    cvMatMulAdd(itsKalman->measurement_matrix, itsKalman->state_pre, NULL, &Zmat);

    targetLoc.i = (int)prediction->data.fl[0];
    targetLoc.j = (int)prediction->data.fl[1];
  }

#endif

  return targetLoc;
}
Example #13
0
// --------------------------------------------------------------------------
// main(Number of arguments, Argument values)
// Description  : This is the entry point of the program.
// Return value : SUCCESS:0  ERROR:-1
// --------------------------------------------------------------------------
int main(int argc, char **argv)
{
    // AR.Drone class
    ARDrone ardrone;

    // Initialize
    if (!ardrone.open()) {
        printf("Failed to initialize.\n");
        return -1;
    }

    // Image of AR.Drone's camera
    IplImage *image = ardrone.getImage();

    // Variables for optical flow
    int corner_count = 50;
    IplImage *gray = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
    IplImage *prev = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
    cvCvtColor(image, prev, CV_BGR2GRAY);
    IplImage *eig_img = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
    IplImage *tmp_img = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
    IplImage *prev_pyramid = cvCreateImage(cvSize(image->width+8, image->height/3), IPL_DEPTH_8U, 1);
    IplImage *curr_pyramid = cvCreateImage(cvSize(image->width+8, image->height/3), IPL_DEPTH_8U, 1);
    CvPoint2D32f *corners1 = (CvPoint2D32f*)malloc(corner_count * sizeof(CvPoint2D32f));
    CvPoint2D32f *corners2 = (CvPoint2D32f*)malloc(corner_count * sizeof(CvPoint2D32f));

    // Main loop
    while (1) {
        // Key input
        int key = cvWaitKey(33);
        if (key == 0x1b) break;

        // Update
        if (!ardrone.update()) break;

        // Get an image
        image = ardrone.getImage();

        // Convert the camera image to grayscale
        cvCvtColor(image, gray, CV_BGR2GRAY);

        // Detect features
        int corner_count = 50;
        cvGoodFeaturesToTrack(prev, eig_img, tmp_img, corners1, &corner_count, 0.1, 5.0, NULL);

        // Corner detected
        if (corner_count > 0) {
            char *status = (char*)malloc(corner_count * sizeof(char));

            // Calicurate optical flows
            CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.3);
            cvCalcOpticalFlowPyrLK(prev, gray, prev_pyramid, curr_pyramid, corners1, corners2, corner_count, cvSize(10, 10), 3, status, NULL, criteria, 0);

            // Drow the optical flows
            for (int i = 0; i < corner_count; i++) {
                cvCircle(image, cvPointFrom32f(corners1[i]), 1, CV_RGB (255, 0, 0));
                if (status[i]) cvLine(image, cvPointFrom32f(corners1[i]), cvPointFrom32f(corners2[i]), CV_RGB (0, 0, 255), 1, CV_AA, 0);
            }

            // Release the memory
            free(status);
        }

        // Save the last frame
        cvCopy(gray, prev);

        // Display the image
        cvShowImage("camera", image);
    }

    // Release the images
    cvReleaseImage(&gray);
    cvReleaseImage(&prev);
    cvReleaseImage(&eig_img);
    cvReleaseImage(&tmp_img);
    cvReleaseImage(&prev_pyramid);
    cvReleaseImage(&curr_pyramid);
    free(corners1);
    free(corners2);

    // See you
    ardrone.close();

    return 0;
}
Example #14
0
void VelocityDetector::LKFlow(Image* output)
{
    // Convert the current image to grey scale
    cvCvtColor(m_currentFrame->asIplImage(), m_currentGreyScale, CV_BGR2GRAY);
    
    
    // make it happen
    IplImage* last = m_lastGreyScale;
    IplImage* current = m_currentGreyScale;
        
    CvPoint2D32f frame1_features[m_lkMaxNumberFeatures];
    
    int number_of_features = m_lkMaxNumberFeatures;
    
    // Choosing the features to track (Shi-Tomasi)
    
    cvGoodFeaturesToTrack(last, m_eig_image, m_temp_image, frame1_features, 
                          &number_of_features, 
                          m_lkMinQualityFeatures, m_lkMinEucDistance, NULL);
    CvPoint2D32f frame2_features[m_lkMaxNumberFeatures];
    
    char optical_flow_found_feature[m_lkMaxNumberFeatures];
    float optical_flow_feature_error[m_lkMaxNumberFeatures];
    
    // To avoid "aperature problem"
    CvSize optical_flow_window = cvSize(3,3);
    
    // Terminates after iterations or when better epsilon is found
    CvTermCriteria optical_flow_termination_criteria
        = cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, m_lkIterations, m_lkEpsilon);
    
    // Running pyramidla L-K Optical Flow algorithm on the desired features
    cvCalcOpticalFlowPyrLK(last, current, m_pyramid1, 
                           m_pyramid2, 
                           frame1_features, frame2_features, 
                           number_of_features, optical_flow_window, 5, 
                           optical_flow_found_feature, 
                           optical_flow_feature_error, 
                           optical_flow_termination_criteria, 0);

    // We are done copy current over to the last
    cvCopyImage(m_currentGreyScale, m_lastGreyScale);
    
    // needs to return m_velocity
    
    CvPoint totalP, totalQ;
    totalP.x = 0;
    totalP.y = 0;
    totalQ.x = 0;
    totalQ.y = 0;
    
    for(int i=0; i < number_of_features; i++)
    {
        // skip feature if not found
        if(optical_flow_found_feature[i] == 0)  continue;
        
        // plots each feature frame to frame
        CvPoint p, q;
        p.x = (int) frame1_features[i].x;
        p.y = (int) frame1_features[i].y;
        q.x = (int) frame2_features[i].x;
        q.y = (int) frame2_features[i].y;
        
        math::Vector2 flowVector(-(q.x - p.x), q.y - p.y);
        
        // Do test
        double lengthDifference = 
            fabs(flowVector.length() - m_velocity.length());
        bool good = false;
        if ((lengthDifference / m_velocity.length()) < m_lkLengthMaxError)
            good = true;
        if (m_velocity.length() < 0.0001)
            good = true;

        if (good)
        {
            totalP.x += p.x;
            totalP.y += p.y;
            totalQ.x += q.x;
            totalQ.y += q.y;
        }
        
        // we can draw then flow field if we want, but for now we will average
        
        // Draw velocity vector
        //if (output)
        //{
        //    CvPoint start;
        //    start.x = output->getWidth() / 2;
        //    start.y = output->getHeight() / 2;
        //    CvPoint end;
        //    end.x = start.x + ((int)(m_velocity.x*m_phaseLineScale));
        //    end.y = start.y - ((int)(m_velocity.y*m_phaseLineScale));
        //    cvLine(output->asIplImage(), start, end, CV_RGB(255,0,0), 1, CV_AA, 0);
        
        if (output)
        {
            int line_thickness = 1;
            CvScalar line_color = CV_RGB(0,0,255);
            if (!good)
                line_color = CV_RGB(0,255,0);
            double angle = atan2((double) p.y - q.y, (double) p.x - q.x);
            double hypotenuse = sqrt(square(p.y - q.y) + square(p.x - q.x));
            // Here we lengthen the arrow by a factor of three.
            q.x = (int) (p.x - m_lkFlowFieldScale * hypotenuse * cos(angle));
            q.y = (int) (p.y - m_lkFlowFieldScale * hypotenuse * sin(angle));
            
            cvLine(output->asIplImage(), p, q, line_color, line_thickness, CV_AA, 0);
            
            p.x = (int) (q.x + 5 * cos(angle + M_PI / 4));
            p.y = (int) (q.y + 5 * sin(angle + M_PI / 4));
            cvLine(output->asIplImage(), p, q, line_color, line_thickness, CV_AA, 0 );
            p.x = (int) (q.x + 5 * cos(angle - M_PI / 4));
            p.y = (int) (q.y + 5 * sin(angle - M_PI / 4));
            cvLine(output->asIplImage(), p, q, line_color, line_thickness, CV_AA, 0);
        }
    }
    
    CvPoint avgP, avgQ;
    avgP.x = 0;
    avgP.y = 0;
    avgQ.x = 0;
    avgQ.y = 0;
    double outImageX = 0;
    double outImageY = 0;
    
    if (number_of_features != 0)
    {
        avgP.x = totalP.x/number_of_features;
        avgP.y = totalP.y/number_of_features;
        avgQ.x = totalQ.x/number_of_features;
        avgQ.y = totalQ.y/number_of_features;
        
        outImageX = avgQ.x - avgP.x;
        outImageY = avgQ.y - avgP.y;
    }
    
    // need to convert coordinates to place origin in center
    
    //double outX = 0;
    //double outY = 0;
    //Detector::imageToAICoordinates(m_lastFrame, outImageX, outImageY, outX, 
      //                             outY);
    
    // assign velocity
    m_velocity = math::Vector2(-outImageX, outImageY);
    
}
Example #15
0
/**
 * Tracks Points from 1.Image to 2.Image.
 * Need initImgs before start and at the end of the program for cleanup.
 *
 * @param imgI      previous Image source. (isn't changed)
 * @param imgJ      actual Image target. (isn't changed)
 * @param ptsI      points to track from first Image.
 *                  Format [0] = x1, [1] = y1, [2] = x2 ...
 * @param nPtsI     number of Points to track from first Image
 * @param ptsJ      container for calculated points of second Image.
 *                  Must have the length of nPtsI.
 * @param nPtsJ     number of Points
 * @param level     Pyramidlevel, default 5
 * @param fb        forward-backward confidence value.
 *                  (corresponds to euclidean distance between).
 *                  Must have the length of nPtsI: nPtsI * sizeof(float).
 * @param ncc       normCrossCorrelation values. needs as inputlength nPtsI * sizeof(float)
 * @param status    Indicates positive tracks. 1 = PosTrack 0 = NegTrack
 *                  needs as inputlength nPtsI * sizeof(char)
 *
 *
 * Based Matlab function:
 * lk(2,imgI,imgJ,ptsI,ptsJ,Level) (Level is optional)
 */
int trackLK(IplImage *imgI, IplImage *imgJ, float ptsI[], int nPtsI,
    float ptsJ[], int nPtsJ, int level, float * fb, float*ncc, char*status)
{
  //TODO: watch NaN cases
  //double nan = std::numeric_limits<double>::quiet_NaN();
  //double inf = std::numeric_limits<double>::infinity();

  // tracking
  int I, J, winsize_ncc;
  CvSize pyr_sz;
  int i;
  //if unused std 5
  if (level == -1)
  {
    level = 5;
  }
  I = 0;
  J = 1;
  winsize_ncc = 10;

  //NOTE: initImgs() must be used correctly or memleak will follow.
  pyr_sz = cvSize(imgI->width + 8, imgI->height / 3);
  PYR[I] = cvCreateImage(pyr_sz, IPL_DEPTH_32F, 1);
  PYR[J] = cvCreateImage(pyr_sz, IPL_DEPTH_32F, 1);

  // Points
  if (nPtsJ != nPtsI)
  {
    printf("Inconsistent input!\n");
    return 0;
  }

  points[0] = (CvPoint2D32f*) malloc(nPtsI * sizeof(CvPoint2D32f)); // template
  points[1] = (CvPoint2D32f*) malloc(nPtsI * sizeof(CvPoint2D32f)); // target
  points[2] = (CvPoint2D32f*) malloc(nPtsI * sizeof(CvPoint2D32f)); // forward-backward
  //TODO:Free
  char* statusBacktrack = (char*) malloc(nPtsI);

  for (i = 0; i < nPtsI; i++)
  {
    points[0][i].x = ptsI[2 * i];
    points[0][i].y = ptsI[2 * i + 1];
    points[1][i].x = ptsJ[2 * i];
    points[1][i].y = ptsJ[2 * i + 1];
    points[2][i].x = ptsI[2 * i];
    points[2][i].y = ptsI[2 * i + 1];
  }

  //lucas kanade track
  cvCalcOpticalFlowPyrLK(imgI, imgJ, PYR[I], PYR[J], points[0], points[1],
      nPtsI, cvSize(win_size_lk, win_size_lk), level, status, 0, cvTermCriteria(
          CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03),
      CV_LKFLOW_INITIAL_GUESSES);

  //backtrack
  cvCalcOpticalFlowPyrLK(imgJ, imgI, PYR[J], PYR[I], points[1], points[2],
      nPtsI, cvSize(win_size_lk, win_size_lk), level, statusBacktrack, 0, cvTermCriteria(
          CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03),
      CV_LKFLOW_INITIAL_GUESSES | CV_LKFLOW_PYR_A_READY | CV_LKFLOW_PYR_B_READY);

    for (i = 0; i < nPtsI; i++)
    {
      if (status[i] && statusBacktrack[i])
      {
    	  status[i] = 1;
      }else{
    	  status[i] = 0;
      }
    }
  normCrossCorrelation(imgI, imgJ, points[0], points[1], nPtsI, status, ncc,
      winsize_ncc, CV_TM_CCOEFF_NORMED);
  euclideanDistance(points[0], points[2], fb, nPtsI);

  for (i = 0; i < nPtsI; i++)
  {
    if (status[i] == 1)
    {
      ptsJ[2 * i] = points[1][i].x;
      ptsJ[2 * i + 1] = points[1][i].y;
    }
    else //flow for the corresponding feature hasn't been found
    {
      //Todo: shell realy write N_A_N in it?
      ptsJ[2 * i] = N_A_N;
      ptsJ[2 * i + 1] = N_A_N;
      fb[i] = N_A_N;
      ncc[i] = N_A_N;
    }
  }
  for (i = 0; i < 3; i++)
  {
    free(points[i]);
    points[i] = 0;
  }
  free(statusBacktrack);
  return 1;
}
Example #16
0
// Lucas-Kanade
Eigen::Matrix<double, 4, 150> lk2(IplImage* imgI, IplImage* imgJ, Eigen::Matrix<double, 2,
		150> const & pointsI, Eigen::Matrix<double, 2, 150> const & pointsJ,
		unsigned int sizeI, unsigned int sizeJ, unsigned int level) {

	double nan = std::numeric_limits<double>::quiet_NaN();

	int Level;
	if (level != 0) {
		Level = (int) level;
	} else {
		Level = 5;
	}

	int I = 0;
	int J = 1;
	int Winsize = 10;

	// Images
	if (IMG[I] != 0) {
		IMG[I] = imgI;
	} else {
		CvSize imageSize = cvGetSize(imgI);
		IMG[I] = cvCreateImage(imageSize, 8, 1);
		PYR[I] = cvCreateImage(imageSize, 8, 1);
		IMG[I] = imgI;
	}

	if (IMG[J] != 0) {
		IMG[J] = imgJ;
	} else {
		CvSize imageSize = cvGetSize(imgJ);
		IMG[J] = cvCreateImage(imageSize, 8, 1);
		PYR[J] = cvCreateImage(imageSize, 8, 1);
		IMG[J] = imgJ;
	}

	// Points
	int nPts = sizeI;

	if (nPts != sizeJ) {
		std::cout << "Inconsistent input!" << std::endl;
		return Eigen::MatrixXd::Zero(1, 1);
	}

	points[0] = (CvPoint2D32f*) cvAlloc(nPts * sizeof(CvPoint2D32f)); // template
	points[1] = (CvPoint2D32f*) cvAlloc(nPts * sizeof(CvPoint2D32f)); // target
	points[2] = (CvPoint2D32f*) cvAlloc(nPts * sizeof(CvPoint2D32f)); // forward-backward

	for (int i = 0; i < nPts; i++) {
		points[0][i].x = pointsI(0, i);
		points[0][i].y = pointsI(1, i);
		points[1][i].x = pointsJ(0, i);
		points[1][i].y = pointsJ(1, i);
		points[2][i].x = pointsI(0, i);
		points[2][i].y = pointsI(1, i);
	}

	float *ncc = (float*) cvAlloc(nPts * sizeof(float));
	float *fb = (float*) cvAlloc(nPts * sizeof(float));
	char *status = (char*) cvAlloc(nPts);

	cvCalcOpticalFlowPyrLK(IMG[I], IMG[J], PYR[I], PYR[J], points[0],
			points[1], nPts, cvSize(win_size, win_size), Level, status, 0,
			cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03),
			CV_LKFLOW_INITIAL_GUESSES);
	cvCalcOpticalFlowPyrLK(IMG[J], IMG[I], PYR[J], PYR[I], points[1],
			points[2], nPts, cvSize(win_size, win_size), Level, 0, 0,
			cvTermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03),
			CV_LKFLOW_INITIAL_GUESSES | CV_LKFLOW_PYR_A_READY
					| CV_LKFLOW_PYR_B_READY );

	normCrossCorrelation(IMG[I], IMG[J], points[0], points[1], nPts, status,
			ncc, Winsize, CV_TM_CCOEFF_NORMED);
	euclideanDistance(points[0], points[2], fb, nPts);

	// Output
	int M = 4;
	Eigen::MatrixXd output(M, 150);
	for (int i = 0; i < nPts; i++) {
		if (status[i] == 1) {
			output(0, i) = (double) points[1][i].x;
			output(1, i) = (double) points[1][i].y;
			output(2, i) = (double) fb[i];
			output(3, i) = (double) ncc[i];
		} else {
			output(0, i) = nan;
			output(1, i) = nan;
			output(2, i) = nan;
			output(3, i) = nan;
		}
	}

	return output;
}
Example #17
0
int FindAndTrackAllPointsOnRegistersOpenCV(unsigned int reg_new , unsigned int reg_old , unsigned int timeout)
{

    if  ( ( video_register[reg_new].pixels == 0 ) || ( video_register[reg_old].pixels == 0 ) ) { return 0; }

    // Load two images and allocate other structures
    struct VideoRegister * MONOCHROME_TMP_REGISTER_OLD = GetTempRegister();
    if (MONOCHROME_TMP_REGISTER_OLD == 0 ) { fprintf(stderr," Error Getting the first temporary Video Register ( TrackAllPointsOnRegistersOpenCV ) \n"); return 0; }

    struct VideoRegister * MONOCHROME_TMP_REGISTER_NEW = GetTempRegister();
    if (MONOCHROME_TMP_REGISTER_NEW == 0 ) { fprintf(stderr," Error Getting the second temporary Video Register ( TrackAllPointsOnRegistersOpenCV ) \n"); return 0; }

    CopyRegister(&video_register[reg_new],MONOCHROME_TMP_REGISTER_NEW,0,0);
    ConvertRegisterFrom3ByteTo1Byte(MONOCHROME_TMP_REGISTER_NEW);

    CopyRegister(&video_register[reg_old],MONOCHROME_TMP_REGISTER_OLD,0,0);
    ConvertRegisterFrom3ByteTo1Byte(MONOCHROME_TMP_REGISTER_OLD);

    image_1->imageData=(char*) MONOCHROME_TMP_REGISTER_OLD->pixels; // UGLY HACK
    image_2->imageData=(char*) MONOCHROME_TMP_REGISTER_NEW->pixels; // UGLY HACK



	int win_size = 15;


	// Get the features for tracking

  StartTimer(FIND_CORNERS_DELAY); // STATISTICS KEEPER FOR HYPERVISOR | START


	int corner_count = MAX_CORNERS;

	cvGoodFeaturesToTrack( image_1, eig_image, tmp_image, cornersA, &corner_count, 0.05, 5.0, 0, 3, 0, 0.04 );

	cvFindCornerSubPix( image_1, cornersA, corner_count, cvSize( win_size, win_size ),
		cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );

  EndTimer(FIND_CORNERS_DELAY); // STATISTICS KEEPER FOR HYPERVISOR | END



	// Call Lucas Kanade algorithm

 StartTimer(TRACK_CORNERS_DELAY); // STATISTICS KEEPER FOR HYPERVISOR | START
	char features_found[ MAX_CORNERS ];
	float feature_errors[ MAX_CORNERS ];

	CvSize pyr_sz = cvSize( image_1->width+8, image_2->height/3 );

	IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
	IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );


	cvCalcOpticalFlowPyrLK( image_1, image_2, pyrA, pyrB, cornersA, cornersB, corner_count,
		cvSize( win_size, win_size ), 5, features_found, feature_errors,
		 cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0 );


 EndTimer(TRACK_CORNERS_DELAY); // STATISTICS KEEPER FOR HYPERVISOR | END



   ClearFeatureList(video_register[reg_new].features);
   video_register[reg_new].features->last_track_time  = video_register[reg_new].time; // AFTER the procedure , the feature list is up to date

   int i=0 ;
   for ( i=0; i <corner_count; i++ )
    {

         AddToFeatureList(  video_register[reg_new].features  ,
                            cornersB[i].x , cornersB[i].y , 1 ,0,0,0);

         video_register[reg_new].features->list[i].last_x = cornersA[i].x;
         video_register[reg_new].features->list[i].last_y = cornersA[i].y;
    }


   unsigned int filtered_out = RemoveTrackPointsIfMovementMoreThan(video_register[reg_new].features,settings[FEATURE_TRACKING_MAX_MOVEMENT_THRESHOLD]);
   if ( filtered_out > 0 )
     {
       // fprintf(stderr,"Filtered %u points due to movement\n", filtered_out  );
     }
   unsigned int outside_zone = 8;
   filtered_out = Remove2DTrackPointsIfOutOfBounds(video_register[reg_new].features,outside_zone,outside_zone,metrics[RESOLUTION_X]-outside_zone,metrics[RESOLUTION_Y]-outside_zone);
   if ( filtered_out > 0 )
     {
       // fprintf(stderr,"Filtered %u points due as out of bounds \n", filtered_out  );
     }

   cvReleaseImage(&pyrA);
   cvReleaseImage(&pyrB);


   StopUsingVideoRegister(MONOCHROME_TMP_REGISTER_NEW);
   StopUsingVideoRegister(MONOCHROME_TMP_REGISTER_OLD);


  return corner_count;
}
Example #18
0
void CV_OptFlowPyrLKTest::run( int )
{
    int code = cvtest::TS::OK;

    const double success_error_level = 0.3;
    const int bad_points_max = 8;

    /* test parameters */
    double  max_err = 0., sum_err = 0;
    int     pt_cmpd = 0;
    int     pt_exceed = 0;
    int     merr_i = 0, merr_j = 0, merr_k = 0;
    char    filename[1000];

    CvPoint2D32f *u = 0, *v = 0, *v2 = 0;
    CvMat *_u = 0, *_v = 0, *_v2 = 0;
    char* status = 0;

    IplImage* imgI = 0;
    IplImage* imgJ = 0;

    int  n = 0, i = 0;

    sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "lk_prev.dat" );
    _u = (CvMat*)cvLoad( filename );

    if( !_u )
    {
        ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
        code = cvtest::TS::FAIL_MISSING_TEST_DATA;
        goto _exit_;
    }

    sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "lk_next.dat" );
    _v = (CvMat*)cvLoad( filename );

    if( !_v )
    {
        ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
        code = cvtest::TS::FAIL_MISSING_TEST_DATA;
        goto _exit_;
    }

    if( _u->cols != 2 || CV_MAT_TYPE(_u->type) != CV_32F ||
        _v->cols != 2 || CV_MAT_TYPE(_v->type) != CV_32F || _v->rows != _u->rows )
    {
        ts->printf( cvtest::TS::LOG, "the loaded matrices of points are not valid\n" );
        code = cvtest::TS::FAIL_MISSING_TEST_DATA;
        goto _exit_;

    }

    u = (CvPoint2D32f*)_u->data.fl;
    v = (CvPoint2D32f*)_v->data.fl;

    /* allocate adidtional buffers */
    _v2 = cvCloneMat( _u );
    v2 = (CvPoint2D32f*)_v2->data.fl;

    /* read first image */
    sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_1.bmp" );
    imgI = cvLoadImage( filename, -1 );

    if( !imgI )
    {
        ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
        code = cvtest::TS::FAIL_MISSING_TEST_DATA;
        goto _exit_;
    }

    /* read second image */
    sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_2.bmp" );
    imgJ = cvLoadImage( filename, -1 );

    if( !imgJ )
    {
        ts->printf( cvtest::TS::LOG, "could not read %s\n", filename );
        code = cvtest::TS::FAIL_MISSING_TEST_DATA;
        goto _exit_;
    }

    n = _u->rows;
    status = (char*)cvAlloc(n*sizeof(status[0]));

    /* calculate flow */
    cvCalcOpticalFlowPyrLK( imgI, imgJ, 0, 0, u, v2, n, cvSize( 41, 41 ),
                            4, status, 0, cvTermCriteria( CV_TERMCRIT_ITER|
                            CV_TERMCRIT_EPS, 30, 0.01f ), 0 );

    /* compare results */
    for( i = 0; i < n; i++ )
    {
        if( status[i] != 0 )
        {
            double err;
            if( cvIsNaN(v[i].x) )
            {
                merr_j++;
                continue;
            }

            err = fabs(v2[i].x - v[i].x) + fabs(v2[i].y - v[i].y);
            if( err > max_err )
            {
                max_err = err;
                merr_i = i;
            }

            pt_exceed += err > success_error_level;
            sum_err += err;
            pt_cmpd++;
        }
        else
        {
            if( !cvIsNaN( v[i].x ))
            {
                merr_i = i;
                merr_k++;
                ts->printf( cvtest::TS::LOG, "The algorithm lost the point #%d\n", i );
                code = cvtest::TS::FAIL_BAD_ACCURACY;
                goto _exit_;
            }
        }
    }

    if( pt_exceed > bad_points_max )
    {
        ts->printf( cvtest::TS::LOG,
                   "The number of poorly tracked points is too big (>=%d)\n", pt_exceed );
        code = cvtest::TS::FAIL_BAD_ACCURACY;
        goto _exit_;
    }

    if( max_err > 1 )
    {
        ts->printf( cvtest::TS::LOG, "Maximum tracking error is too big (=%g) at %d\n", max_err, merr_i );
        code = cvtest::TS::FAIL_BAD_ACCURACY;
        goto _exit_;
    }

_exit_:

    cvFree( &status );
    cvReleaseMat( &_u );
    cvReleaseMat( &_v );
    cvReleaseMat( &_v2 );

    cvReleaseImage( &imgI );
    cvReleaseImage( &imgJ );

    if( code < 0 )
        ts->set_failed_test_info( code );
}
Example #19
0
int lk_work(CAMOBJ * st)
{
        int i, k;
		float mx,my,cx,cy;

    //    frame = cvQueryFrame( capture );
        if( !frame )
            return(1);

        if( !image ) 	            // allocate all the buffers 
		{
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_grey = cvCreateImage( cvGetSize(frame), 8, 1 );
            save_grey = cvCreateImage( cvGetSize(frame), 8, 1 );

            pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            prev_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );
            save_pyramid = cvCreateImage( cvGetSize(frame), 8, 1 );

            points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
            save_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));

            status = (char*)cvAlloc(MAX_COUNT);
			for (i=0;i<MAX_COUNT;i++) pt_mode[i]=0;
            flags = 0;
			statuscount++;
		}
        cvCopy( frame, image, 0 );

		if (st->mode==1)
		{
		  if (!video_writer)
		       video_writer = cvCreateVideoWriter(st->videofilename,-1,15,cvGetSize(image));
		  	
		  cvWriteFrame(video_writer,image);
  		}


        if (st->enable_tracking)
		{
		cvCvtColor( image, grey, CV_BGR2GRAY );

        if( night_mode )
            cvZero( image );

		if (need_to_init)
		{
		  need_to_init=0;
		  init_flag=0;

		  if (st->trackface)
		  {
			if (detect_face())
			{
				int x;

				count=2;

				cvFindCornerSubPix( grey, points[1], count,
					cvSize(win_size,win_size), cvSize(-1,-1),
					cvTermCriteria(CV_TERMCRIT_ITER,1,1.0));
//					cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
  
	            cvCopy(grey,save_grey,0 );
		        cvCopy(pyramid,save_pyramid,0 );
				cvCopy(grey,prev_grey,0 );
		        cvCopy(pyramid,prev_pyramid,0 );

			    for (x=0;x<count;x++)
				{
					save_points[x].x=points[1][x].x;
					save_points[x].y=points[1][x].y;
					points[0][x].x=points[1][x].x;
					points[0][x].y=points[1][x].y;
					save_pt_mode[x]=pt_mode[x];
				}
				calc_distances(1);
				save_count=count;
				add_remove_pt = 0;
	            flags = 0;
				time_to_restore=0;
				
			} 
		  }
		  else
		  {
			    save_points[0].x=PT1_xpos*100;
				save_points[0].y=PT1_ypos*100;
				points[0][0].x=PT1_xpos*100;
				points[0][0].y=PT1_ypos*100;
				save_pt_mode[0]=0;
				count=1;MAX_COUNT=1;
				calc_distances(1);

  				cvFindCornerSubPix( grey, points[1], 1,
					cvSize(win_size,win_size), cvSize(-1,-1),
					cvTermCriteria(CV_TERMCRIT_ITER,1,1.0));
	            
				// report("hallo");
				cvCopy(grey,save_grey,0 );
		        cvCopy(pyramid,save_pyramid,0 );
				cvCopy(grey,prev_grey,0 );
		        cvCopy(pyramid,prev_pyramid,0 );
				
				save_count=1;
				add_remove_pt = 0;
	            flags = 0;
				//time_to_restore=0;
				
		  }

		}        

		if(count < MAX_COUNT) need_to_init=1;
		else
        {
			
            cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
                points[0], points[1], count, cvSize(win_size,win_size), 5, status, 0,
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags );
            flags |= CV_LKFLOW_PYR_A_READY;

			mx=0;my=0;
			cx=0;cy=0;mcount=0;ccount=0;
            for( i = k = 0; i < count; i++ )
            {

                if( add_remove_pt )
                {
                    double dx = pt.x - points[1][i].x;
                    double dy = pt.y - points[1][i].y;

                    if( dx*dx + dy*dy <= 25 )
                    {
                        add_remove_pt = 0;
                        if (pt_mode[i]==1) {pt_mode[i]=0; continue;}
						pt_mode[i]=1;
                    }
                }
                
                if( !status[i] ) { need_to_init=1; status[i]=true; }
                    

				if (pt_mode[i]==1)
				{
					cx+= (points[0][i].x - points[1][i].x);
					cy+= (points[0][i].y - points[1][i].y);
					ccount++;
				}
				else
				{
					mx += (points[0][i].x - points[1][i].x);
					my += (points[0][i].y - points[1][i].y);
					mcount++;
				}
				
				points[1][k] = points[1][i];
				pt_mode[k++]=pt_mode[i];
				if (need_to_init)
				  cvCircle( image, cvPointFrom32f(points[1][i]), 4, CV_RGB(255,0,0), 2, 8,0);
				else if (pt_mode[i]==1)
                  cvCircle( image, cvPointFrom32f(points[1][i]), 4, CV_RGB(255,255,0), 2, 8,0);
				  else
				   cvCircle( image, cvPointFrom32f(points[1][i]), 4, CV_RGB(0,210,0), 2, 8,0);
            }
            count = k;
			if (k==MAX_COUNT)
			{
				if (init_flag>1)
				{
				x_move=mx/mcount;
				y_move=my/mcount;
				x_click=cx/ccount;
				y_click=cy/ccount;
				}
				if (st->trackface) calc_distances(0); else calc_distances(2);
				
				
				if ((autorestore)) // && (init_flag>5))
				{
				  if (st->trackface)
				  {
					if ((dist_error>=dist_threshold) || (angle_error>=angle_threshold))
						time_to_restore++;
					else time_to_restore=0;

					if (time_to_restore>threshold_time)
					{ need_to_init=1; time_to_restore=0; }
				  }
				  else
				  {
					if ((dist_error>=dist_threshold))
						time_to_restore++;
					else time_to_restore=0;

					if (time_to_restore>threshold_time)
					{ need_to_init=1; time_to_restore=0; }

				  }
				  
				}
				
					
			} 
        }

        if( add_remove_pt && count < MAX_COUNT )
        {
            points[1][count++] = cvPointTo32f(pt);
            cvFindCornerSubPix( grey, points[1] + count - 1, 1,
                cvSize(win_size,win_size), cvSize(-1,-1),
                cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
            add_remove_pt = 0;
        }

	  }

	  CV_SWAP( prev_grey, grey, swap_temp );
	  CV_SWAP( prev_pyramid, pyramid, swap_temp );
	  CV_SWAP( points[0], points[1], swap_points );
		
	  if (init_flag<1000) init_flag++;

	  if (st->showlive) cvShowImage( "Camera", image );
	

	return(0);
}
Example #20
0
void vpKltOpencv::track(const IplImage *I)
{
  if (!initialized) {
    vpERROR_TRACE("KLT Not initialized") ;
    throw(vpException(vpTrackingException::initializationError,
                      "KLT Not initialized")) ;
  }

  if (!I) {
    throw(vpException(vpTrackingException::initializationError,
                      "Image Not initialized")) ;
  }

  if (I->depth != IPL_DEPTH_8U || I->nChannels != 1)  {
    throw(vpException(vpTrackingException::initializationError,
                      "Bad Image format")) ;
  }

  

  CV_SWAP(prev_image, image, swap_temp);
  CV_SWAP(prev_pyramid, pyramid, swap_temp);
  
  cvCopy(I, image, 0);
  
  if(!initial_guess){
    // Save current features as previous features
    countPrevFeatures = countFeatures;
    for (int boucle=0; boucle<countFeatures;boucle++)  {
      prev_featuresid[boucle] = featuresid[boucle];
    }
    
    CvPoint2D32f *swap_features = 0;
    CV_SWAP(prev_features, features, swap_features);
  }
  
  if (countFeatures <= 0) return;

  cvCalcOpticalFlowPyrLK( prev_image, image, prev_pyramid, pyramid,
                          prev_features, features, countFeatures,
                          cvSize(win_size, win_size), pyramid_level,
                          status, 0, cvTermCriteria(CV_TERMCRIT_ITER
                                                    |CV_TERMCRIT_EPS,20,0.03),
                          flags );
  
  if(!initial_guess)
    flags |= CV_LKFLOW_PYR_A_READY;
  else{
    flags = CV_LKFLOW_PYR_A_READY;
    initial_guess = false;
  }

  int i,k;
  for (i = k = 0; i < countFeatures ; i++)  {
    if (!status[i]) 	{
      lostDuringTrack[i] = 1;
      if (OnFeatureLost)
        OnFeatureLost(_tid, i, featuresid[i], features[i].x,
                      features[i].y);
      continue;
    }
    
    if (IsFeatureValid)	{
      if (!IsFeatureValid(_tid, features[i].x, features[i].y))   {
        lostDuringTrack[i] = 1;
        if (OnFeatureLost)
          OnFeatureLost(_tid, i, featuresid[i], features[i].x, features[i].y);
        continue;
      }
    }
    features[k] = features[i];
    featuresid[k] = featuresid[i];

    if (OnMeasureFeature) OnMeasureFeature(_tid, k, featuresid[k], features[k].x, features[k].y);
    
    lostDuringTrack[i] = 0;
    k++;
  }
  countFeatures = k;
}
static void
kms_crowd_detector_compute_optical_flow (KmsCrowdDetector * crowddetector,
    IplImage * binary_actual_motion, CvRect container, int curve)
{
  IplImage *eig_image;
  IplImage *temp_image;
  IplImage *frame1_1C;
  IplImage *frame2_1C;
  IplImage *pyramid1;
  IplImage *pyramid2;
  CvSize frame_size;
  CvPoint2D32f frame2_features[NUMBER_FEATURES_OPTICAL_FLOW];
  char optical_flow_found_feature[NUMBER_FEATURES_OPTICAL_FLOW];
  float optical_flow_feature_error[NUMBER_FEATURES_OPTICAL_FLOW];
  CvPoint2D32f frame1_features[NUMBER_FEATURES_OPTICAL_FLOW];
  int number_of_features = NUMBER_FEATURES_OPTICAL_FLOW;
  CvSize optical_flow_window =
      cvSize (WINDOW_SIZE_OPTICAL_FLOW, WINDOW_SIZE_OPTICAL_FLOW);

  frame_size.width = crowddetector->priv->actual_image->width;
  frame_size.height = crowddetector->priv->actual_image->height;

  eig_image = cvCreateImage (frame_size, IPL_DEPTH_8U, 1);
  frame1_1C = cvCreateImage (frame_size, IPL_DEPTH_8U, 1);
  frame2_1C = cvCreateImage (frame_size, IPL_DEPTH_8U, 1);

  cvConvertImage (crowddetector->priv->actual_image, frame1_1C, 0);
  cvConvertImage (crowddetector->priv->previous_image, frame2_1C, 0);
  temp_image = cvCreateImage (frame_size, IPL_DEPTH_32F, 1);

  cvGoodFeaturesToTrack (frame1_1C, eig_image, temp_image, frame1_features,
      &number_of_features, QUALITY_LEVEL, MIN_DISTANCE, NULL,
      BLOCK_SIZE, USE_HARRIS_DETECTOR, HARRIS_DETECTOR_K);

  CvTermCriteria optical_flow_termination_criteria =
      cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS,
      MAX_ITER_OPTICAL_FLOW, EPSILON_OPTICAL_FLOW);

  pyramid1 = cvCreateImage (frame_size, IPL_DEPTH_8U, 1);
  pyramid2 = cvCreateImage (frame_size, IPL_DEPTH_8U, 1);

  cvCalcOpticalFlowPyrLK (frame2_1C, frame1_1C, pyramid1, pyramid2,
      frame1_features, frame2_features, number_of_features,
      optical_flow_window, 3, optical_flow_found_feature,
      optical_flow_feature_error, optical_flow_termination_criteria, 0);

  cvCopy (crowddetector->priv->actual_image,
      crowddetector->priv->previous_image, 0);

  kms_crowd_detector_compute_roi_direction_vector (crowddetector,
      number_of_features, optical_flow_found_feature, frame1_features,
      frame2_features, CV_RGB (255, 0, 0), binary_actual_motion, container,
      curve);

  cvReleaseImage (&eig_image);
  cvReleaseImage (&temp_image);
  cvReleaseImage (&frame1_1C);
  cvReleaseImage (&frame2_1C);
  cvReleaseImage (&pyramid1);
  cvReleaseImage (&pyramid2);
}
Example #22
0
void Points2::retreiveFrame(cv::Mat & frame) {

	bobs.clear();
	double horizProp = (double) 640 / frame.cols;
	double vertProp = (double) 480 / frame.rows;

    CvSize frameSize;
    frameSize.width = frame.size().width;
    frameSize.height = frame.size().height;

    checkSize(frameSize);

    IplImage *dupa = new IplImage(frame);

    cvCopy(dupa,tempImage,0);

    cvCvtColor(tempImage, currGreyImage, CV_BGR2GRAY);

    int i, k;

    if( count > 0 )
    {
        cvCalcOpticalFlowPyrLK( prevGreyImage, currGreyImage, prevPyramid, currPyramid,
            points[0], points[1], count, cvSize(20,20), 3, status, 0,
            cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), flags );

        flags |= CV_LKFLOW_PYR_A_READY;
        for( i = k = 0; i < count; i++ )
        {
            if( add_remove_pt )
            {
                double dx = pt.x - points[1][i].x;
                double dy = pt.y - points[1][i].y;

                if( dx*dx + dy*dy <= 25 )
                {
                    add_remove_pt = 0;
                    continue;
                }
            }

            if( !status[i] )
                continue;

            points[1][k++] = points[1][i];
            cv::circle( frame, cvPointFrom32f(points[1][i]), 3, CV_RGB(0,255,0), -1, 8,0);
			bobs.append(BOb((quint16) (horizProp * points[1][i].x),
							(quint16) (vertProp * points[1][i].y),
							1,1));
        }
        count = k;
    }

    if( add_remove_pt && count < max_count )
    {

        points[1][count++] = cvPointTo32f(pt);

        cvFindCornerSubPix( currGreyImage, points[1] + count - 1, 1,
            cvSize(10,10), cvSize(-1,-1),
            cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
        add_remove_pt = 0;

    }

    CV_SWAP( prevGreyImage, currGreyImage, swapImage );
    CV_SWAP( prevPyramid, currPyramid, swapImage );
    CV_SWAP( points[0], points[1], swap_points );
    delete dupa;

	if(!bobs.empty())
		emit bobjects(&bobs);
}
Example #23
0
void calc_homography(const IplImage *src, IplImage *dst[], CvMat *hom[], int image_num)
{
	CvSize size = cvSize(src->width, src->height);
	IplImage *img_prev = cvCreateImage(size, src->depth, 1);//单通道图像
	IplImage *img_curr = cvCreateImage(size, src->depth, 1);
	cvCvtColor(src, img_prev, CV_BGR2GRAY);
	
	CvPoint2D32f features[MAX_CORNERS];
	CvPoint2D32f features_curr[MAX_CORNERS];
	int corner_count = MAX_CORNERS;
	
	int t1 = clock();
	cvGoodFeaturesToTrack(img_prev, NULL, NULL, features, &corner_count, 0.02, 0.5, NULL, 3, 0, 0.04);
	//good features to track 得到的features 相当于输出
	//quality_level 0.01表示一点被认为是角点的最小特征值
	//min_distance 0.5角点间的距离不小于x个像素
	st1 += clock()-t1;
	
	t1 = clock();
	cvFindCornerSubPix(img_prev, features, corner_count, cvSize(WIN_SIZE,WIN_SIZE), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.03));
	//求更加精细的亚像素级角点
	//window的大小21*21
	st2 += clock()-t1;
	
	char feature_found[MAX_CORNERS];
	float feature_error[MAX_CORNERS];
	CvPoint2D32f good_src[MAX_CORNERS];
	CvPoint2D32f good_dst[MAX_CORNERS];
	CvSize pyr_size = cvSize(img_prev->width + 8, img_prev->height/3);//?
	IplImage *pyr_prev = cvCreateImage(pyr_size, IPL_DEPTH_32F, 1);//两幅金字塔图像缓存
	IplImage *pyr_curr = cvCreateImage(pyr_size, IPL_DEPTH_32F, 1);
	
	for (int k = 0; k < image_num; ++k)
	{
		cvCvtColor(dst[k], img_curr, CV_BGR2GRAY);
		t1 = clock();
		cvCalcOpticalFlowPyrLK(img_prev, img_curr, pyr_prev, pyr_curr, features, features_curr, corner_count, cvSize(WIN_SIZE,WIN_SIZE), 5, feature_found, feature_error, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.03), 0);
		//计算光流 金字塔 level为5
		//得到的最终图像为img_curr
		//features_found得到的长度为corner的count
		st3 += clock()-t1;
	
		int good_num = 0;
		for (int i = 0; i < corner_count; ++i)
		{
			if (feature_found[i] != 0 && feature_error[i] < 550)
			//比较好的feature记录
			{
				good_src[good_num] = features[i];
				good_dst[good_num] = features_curr[i];
				++good_num;
			}
		}
	
		if (good_num >= 4)
		{
			CvMat pt_src = cvMat(1, good_num, CV_32FC2, good_src);
			CvMat pt_dst = cvMat(1, good_num, CV_32FC2, good_dst);
			
			t1 = clock();
			cvFindHomography(&pt_src, &pt_dst, hom[k], CV_RANSAC, 5, NULL);
			st4 += clock()-t1;
		}
		else fprintf(stderr, "Unable to calc homography : %d\n", k);
	}
	cvReleaseImage(&pyr_prev);
	cvReleaseImage(&pyr_curr);
	cvReleaseImage(&img_prev);
	cvReleaseImage(&img_curr);
}
Example #24
0
int main(int argc, char * argv[])
{
	if(argc < 2) {
		fprintf(stderr, "%s image1 image2\n", argv[0]);
		return 1;
	}

	char * im1fname = argv[1];
	char * im2fname = argv[2];

	IplImage * image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_GRAYSCALE);

	IplImage * eigenvalues = cvCreateImage(cvGetSize(image1), 32, 1);
	IplImage * temp = cvCreateImage(cvGetSize(image1), 32, 1);

	int count = MAX_COUNT;
	double quality = 0.5;
	// double min_distance = 2;
	double min_distance = 50;
	int block_size = 7;
	int use_harris = 0;
	int win_size = 10;
	int flags = 0;

	CvPoint2D32f * source_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	CvPoint2D32f * dest_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));
	CvPoint2D32f * delaunay_points = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(CvPoint2D32f));

	cvGoodFeaturesToTrack( image1, eigenvalues, temp, source_points, &count,
			quality, min_distance, 0, block_size, use_harris, 0.04 );

	printf("%d features\n",count);

	setbuf(stdout, NULL);

	printf("Finding corner subpix...");
	cvFindCornerSubPix( image1, source_points, count,
			cvSize(win_size,win_size), cvSize(-1,-1),
			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03));
	printf("done.\n");

	cvReleaseImage(&eigenvalues);
	cvReleaseImage(&temp);

	IplImage * image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_GRAYSCALE);

	char * status = (char*)cvAlloc(sizeof(char)*MAX_COUNT);

	IplImage * pyramid = cvCreateImage( cvGetSize(image1), IPL_DEPTH_8U, 1 );
	IplImage * second_pyramid = cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 1 );

	printf("Computing optical flow...");	
	cvCalcOpticalFlowPyrLK(image1, image2, pyramid, second_pyramid, source_points,
		dest_points, count, cvSize(win_size,win_size), 4, status, 0,
		cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03),
		flags);
	printf("done.\n");

	int num_matches = 0;
	int num_out_matches = 0;
	int max_dist = 30;
	int offset = 200;
	
	CvMemStorage * storage = cvCreateMemStorage(0);
	CvSubdiv2D * delaunay = cvCreateSubdivDelaunay2D( cvRect(0,0,image1->width,image1->height), storage);

	cvReleaseImage(&image1);
	cvReleaseImage(&image2);
	
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);

	cvSet( image1, cvScalarAll(255) );

	std::map<CvPoint, CvPoint> point_lookup_map;
	std::vector<std::pair<CvPoint, CvPoint> > point_lookup;

	// put corners in the point lookup as going to themselves
	point_lookup_map[cvPoint(0,0)] = cvPoint(0,0);
	point_lookup_map[cvPoint(0,image1->height-1)] = cvPoint(0,image1->height-1);
	point_lookup_map[cvPoint(image1->width-1,0)] = cvPoint(image1->width-1,0);
	point_lookup_map[cvPoint(image1->width-1,image1->height-1)] = cvPoint(image1->width-1,image1->height-1);

	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,0), cvPoint(0,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(0,image1->height-1), cvPoint(0,image1->height-1)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,0), cvPoint(image1->width-1,0)));
	point_lookup.push_back(std::pair<CvPoint,CvPoint>(cvPoint(image1->width-1,image1->height-1), cvPoint(image1->width-1,image1->height-1)));

	printf("Inserting corners...");
	// put corners in the Delaunay subdivision
	for(unsigned int i = 0; i < point_lookup.size(); i++) {
		cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(point_lookup[i].first) );
	}
	printf("done.\n");

	CvSubdiv2DEdge proxy_edge;
	for(int i = 0; i < count; i++) {
		if(status[i]) {
			CvPoint source = cvPointFrom32f(source_points[i]);
			CvPoint dest = cvPointFrom32f(dest_points[i]);
	
			if((((int)fabs((double)(source.x - dest.x))) > max_dist) ||
				 (((int)fabs((double)(source.y - dest.y))) > max_dist)) {	
				num_out_matches++;
			}
			else if((dest.x >= 0) && (dest.y >= 0) && (dest.x < (image1->width)) && (dest.y < (image1->height))) {
				if(point_lookup_map.find(source) == point_lookup_map.end()) {
					num_matches++;
				
					point_lookup_map[source] = dest;
					point_lookup.push_back(std::pair<CvPoint,CvPoint>(source,dest));
					delaunay_points[i] = (cvSubdivDelaunay2DInsert( delaunay, cvPointTo32f(source) ))->pt;
					cvSetImageROI( image1, cvRect(source.x-8,source.y-8,8*2,8*2) );
					cvResetImageROI( image2 );
					cvGetRectSubPix( image2, image1, dest_points[i] );
				}
				/*
				cvSet2D( image1, source.y, source.x, cvGet2D( image2, dest.y, dest.x ) );
				cvSet2D( image1, source.y, source.x+1, cvGet2D( image2, dest.y, dest.x+1 ) );
				cvSet2D( image1, source.y, source.x-1, cvGet2D( image2, dest.y, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x, cvGet2D( image2, dest.y+1, dest.x ) );
				cvSet2D( image1, source.y-1, source.x, cvGet2D( image2, dest.y-1, dest.x ) );
				cvSet2D( image1, source.y+1, source.x+1, cvGet2D( image2, dest.y+1, dest.x+1 ) );
				cvSet2D( image1, source.y-1, source.x-1, cvGet2D( image2, dest.y-1, dest.x-1 ) );
				cvSet2D( image1, source.y+1, source.x-1, cvGet2D( image2, dest.y+1, dest.x-1 ) );
				cvSet2D( image1, source.y-1, source.x+1, cvGet2D( image2, dest.y-1, dest.x+1 ) );
				*/

				// cvCircle( image1, source, 4, CV_RGB(255,0,0), 2, CV_AA );
				// cvCircle( image2, dest, 4, CV_RGB(255,0,0), 2, CV_AA );
			}

			/*
			cvSetImageROI( image1, cvRect(source.x-offset,source.y-offset,offset*2,offset*2) );
			cvSetImageROI( image2, cvRect(dest.x-offset,dest.y-offset,offset*2,offset*2) );
			cvNamedWindow("image1",0);
			cvNamedWindow("image2",0);
			cvShowImage("image1",image1);
			cvShowImage("image2",image2);
			printf("%d,%d -> %d,%d\n",source.x,source.y,dest.x,dest.y);
			cvWaitKey(0);
			cvDestroyAllWindows();
			*/
		}
	}
	printf("%d %d\n",num_matches,num_out_matches);
	printf("%d lookups\n",point_lookup_map.size());

	cvResetImageROI( image1 );

	cvSaveImage("sparse.jpg", image1);

	cvReleaseImage(&image1);
	image1 = cvLoadImage(im1fname, CV_LOAD_IMAGE_COLOR);
	cvSet( image1, cvScalarAll(255) );
	printf("Warping image...");

	CvSeqReader  reader;
	int total = delaunay->edges->total;
	int elem_size = delaunay->edges->elem_size;

	cvStartReadSeq( (CvSeq*)(delaunay->edges), &reader, 0 );

	std::vector<Triangle> trivec;
	std::vector<CvMat *> baryinvvec;

	for( int i = 0; i < total; i++ ) {
		CvQuadEdge2D* edge = (CvQuadEdge2D*)(reader.ptr);

		if( CV_IS_SET_ELEM( edge ))	{
			CvSubdiv2DEdge curedge = (CvSubdiv2DEdge)edge;
			CvSubdiv2DEdge t = curedge;
			Triangle temptri;
			int count = 0;
			
			// construct a triangle from this edge
			do {
				CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
				if(count < 3) {
					pt->pt.x = pt->pt.x >= image1->width ? image1->width-1 : pt->pt.x;
					pt->pt.y = pt->pt.y >= image1->height ? image1->height-1 : pt->pt.y;
					pt->pt.x = pt->pt.x < 0 ? 0 : pt->pt.x;
					pt->pt.y = pt->pt.y < 0 ? 0 : pt->pt.y;

					temptri.points[count] = cvPointFrom32f( pt->pt );
				}
				else {
					printf("More than 3 edges\n");
				}
				count++;
				t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
			} while( t != curedge );
			
			// check that triangle is not already in
			if( std::find(trivec.begin(), trivec.end(), temptri) == trivec.end() ) {
				// push triangle in and draw
				trivec.push_back(temptri);
				cvLine( image1, temptri.points[0], temptri.points[1], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[1], temptri.points[2], CV_RGB(255,0,0), 1, CV_AA, 0 );
				cvLine( image1, temptri.points[2], temptri.points[0], CV_RGB(255,0,0), 1, CV_AA, 0 );

				// compute barycentric computation vector for this triangle
				CvMat * barycen = cvCreateMat( 3, 3, CV_32FC1 );
				CvMat * baryceninv = cvCreateMat( 3, 3, CV_32FC1 );

				barycen->data.fl[3*0+0] = temptri.points[0].x;
				barycen->data.fl[3*0+1] = temptri.points[1].x;
				barycen->data.fl[3*0+2] = temptri.points[2].x;
				barycen->data.fl[3*1+0] = temptri.points[0].y;
				barycen->data.fl[3*1+1] = temptri.points[1].y;
				barycen->data.fl[3*1+2] = temptri.points[2].y;
				barycen->data.fl[3*2+0] = 1;
				barycen->data.fl[3*2+1] = 1;
				barycen->data.fl[3*2+2] = 1;

				cvInvert( barycen, baryceninv, CV_LU );
				baryinvvec.push_back(baryceninv);

				cvReleaseMat( &barycen );
			}
		}

		CV_NEXT_SEQ_ELEM( elem_size, reader );
	}
	printf("%d triangles...", trivec.size());
	cvSaveImage("triangles.jpg", image1);
	
	cvSet( image1, cvScalarAll(255) );
	IplImage * clean_nonthresh = cvLoadImage( "conhull-clean.jpg", CV_LOAD_IMAGE_COLOR );

	// for each triangle
	for(unsigned int i = 0; i < trivec.size(); i++) {
		Triangle curtri = trivec[i];
		CvMat * curpoints = cvCreateMat( 1, 3, CV_32SC2 );
		Triangle target;
		std::map<CvPoint,CvPoint>::iterator piter[3];
		
		printf("Triangle %d / %d\n",i,trivec.size());
		bool is_corner = false;
		for(int j = 0; j < 3; j++) {
			/*
			curpoints->data.i[2*j+0] = curtri.points[j].x;
			curpoints->data.i[2*j+1] = curtri.points[j].y;
			*/
			CV_MAT_ELEM( *curpoints, CvPoint, 0, j ) = curtri.points[j];
			printf("%d,%d\n",curtri.points[j].x,curtri.points[j].y);
	
			/*	
			if((curtri.points[j] == cvPoint(0,0)) ||  (curtri.points[j] == cvPoint(0,image1->height)) ||(curtri.points[j] == cvPoint(image1->width,0)) ||(curtri.points[j] == cvPoint(image1->width,image1->height))) {
				is_corner = true;
				break;
			}
			*/

			for(unsigned int k = 0; k < point_lookup.size(); k++) {
				std::pair<CvPoint,CvPoint> thispair = point_lookup[k];
				if(thispair.first == curtri.points[j]) {
					target.points[j] = thispair.second;
					break;
				}
			}

			/*
			piter[j] = point_lookup_map.find(curtri.points[j]);
			if(piter[j] != point_lookup_map.end() ) {
				target.points[j] = piter[j]->second;
			}
			*/
		}
			
		// if((piter[0] != point_lookup_map.end()) && (piter[1] != point_lookup_map.end()) && (piter[2] != point_lookup_map.end())) {
		if(!is_corner) {
			CvMat * newcorners = cvCreateMat( 3, 3, CV_32FC1 );
			newcorners->data.fl[3*0+0] = target.points[0].x;
			newcorners->data.fl[3*0+1] = target.points[1].x;
			newcorners->data.fl[3*0+2] = target.points[2].x;
			newcorners->data.fl[3*1+0] = target.points[0].y;
			newcorners->data.fl[3*1+1] = target.points[1].y;
			newcorners->data.fl[3*1+2] = target.points[2].y;
			newcorners->data.fl[3*2+0] = 1;
			newcorners->data.fl[3*2+1] = 1;
			newcorners->data.fl[3*2+2] = 1;

			CvContour hdr;
			CvSeqBlock blk;
			CvRect trianglebound = cvBoundingRect( cvPointSeqFromMat(CV_SEQ_KIND_CURVE+CV_SEQ_FLAG_CLOSED, curpoints, &hdr, &blk), 1 );
			printf("Bounding box: %d,%d,%d,%d\n",trianglebound.x,trianglebound.y,trianglebound.width,trianglebound.height);
			for(int y = trianglebound.y; (y < (trianglebound.y + trianglebound.height)) && ( y < image1->height); y++) {
				for(int x = trianglebound.x; (x < (trianglebound.x + trianglebound.width)) && (x < image1->width); x++) {
					// check to see if we're inside this triangle
					/*
					CvPoint v0 = cvPoint( curtri.points[2].x - curtri.points[0].x, curtri.points[2].y - curtri.points[0].y );
					CvPoint v1 = cvPoint( curtri.points[1].x - curtri.points[0].x, curtri.points[1].y - curtri.points[0].y );
					CvPoint v2 = cvPoint( x - curtri.points[0].x, y - curtri.points[0].y );
					
					int dot00 = v0.x * v0.x + v0.y * v0. y;
					int dot01 = v0.x * v1.x + v0.y * v1. y;
					int dot02 = v0.x * v2.x + v0.y * v2. y;
					int dot11 = v1.x * v1.x + v1.y * v1. y;
					int dot12 = v1.x * v2.x + v1.y * v2. y;

					double invDenom = 1.0 / (double)(dot00 * dot11 - dot01 * dot01);
					double u = (double)(dot11 * dot02 - dot01 * dot12) * invDenom;
					double v = (double)(dot00 * dot12 - dot01 * dot02) * invDenom;
					*/

					CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
					CvMat * result = cvCreateMat(3, 1, CV_32FC1);
					curp->data.fl[0] = x;
					curp->data.fl[1] = y;
					curp->data.fl[2] = 1;
					cvMatMul( baryinvvec[i], curp, result );
					// double u = result->data.fl[0]/result->data.fl[2];
					// double v = result->data.fl[1]/result->data.fl[2];
			

					if( (result->data.fl[0] > 0) && (result->data.fl[1] > 0) && (fabs(1.0 - (result->data.fl[0]+result->data.fl[1]+result->data.fl[2])) <= 0.01) ) {
					// if((u > 0) || (v > 0) /*&& ((u +v) < 1)*/ ) {
						// printf("Barycentric: %f %f %f\n", result->data.fl[0], result->data.fl[1], result->data.fl[2]);
						// this point is inside this triangle
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//	trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

						CvMat * sourcepoint = cvCreateMat(3, 1, CV_32FC1);
						cvMatMul( newcorners, result, sourcepoint );
						double sourcex = sourcepoint->data.fl[0]/*/sourcepoint->data.fl[2]*/;
						double sourcey = sourcepoint->data.fl[1]/*/sourcepoint->data.fl[2]*/;
						if((sourcex >= 0) && (sourcey >= 0) && (sourcex < (image1->width)) && (sourcey < (image1->height))) {
							// printf("%d,%d %d,%d\n",x,y,(int)sourcex,(int)sourcey);
							cvSet2D( image1, y, x, cvGet2D( clean_nonthresh, (int)sourcey, (int)sourcex ) );
						}
	
						/*
						if((i == 143) && (y == 3577) && (x > 2055) && (x < 2087)) {
							printf("%d: %f, %f, %f\t%f, %f, %f\n",x,result->data.fl[0],result->data.fl[1],result->data.fl[2],
									sourcepoint->data.fl[0],sourcepoint->data.fl[1],sourcepoint->data.fl[2]);
						}
						*/
	
						cvReleaseMat( &sourcepoint );
						
						// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
						//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

					}
					cvReleaseMat( &result );
					cvReleaseMat( &curp );
				}
			}
			cvReleaseMat( &newcorners );
		}
		cvReleaseMat( &curpoints );
	}
	/*
	for(int y = 0; y < image1->height; y++) {
		for(int x = 0; x < image1->width; x++) {
			CvMat * curp = cvCreateMat(3, 1, CV_32FC1);
			CvMat * result = cvCreateMat(3, 1, CV_32FC1);
			curp->data.fl[0] = x;
			curp->data.fl[1] = y;
			curp->data.fl[2] = 1;
			for(unsigned int i = 0; i < baryinvvec.size(); i++) {
				cvMatMul( baryinvvec[i], curp, result );
				double u = result->data.fl[0]/result->data.fl[2];
				double v = result->data.fl[1]/result->data.fl[2];
				if((u > 0) && (v > 0) && (u + v < 1)) {
					// printf("Point %d,%d inside %d,%d %d,%d %d,%d\n",x,y,trivec[i].points[0].x,trivec[i].points[0].y,
					//		trivec[i].points[1].x,trivec[i].points[1].y,trivec[i].points[2].x,trivec[i].points[2].y);

					break;
				}
			}
			cvReleaseMat( &result );
			cvReleaseMat( &curp );
		}
	}
	*/

	cvReleaseImage( &clean_nonthresh );

#ifdef OLD_BUSTED
	for(int y = 0; y < image1->height; y++) {
		for(int x = 0; x < image1->width; x++) {
			CvSubdiv2DPointLocation locate_result;
			CvSubdiv2DEdge on_edge;
			CvSubdiv2DPoint * on_vertex;
			CvPoint curpoint = cvPoint( x, y );
			locate_result = cvSubdiv2DLocate( delaunay, cvPointTo32f( curpoint ),
				&on_edge, &on_vertex );
			if( (locate_result != CV_PTLOC_OUTSIDE_RECT) && (locate_result != CV_PTLOC_ERROR) ) {
				if( locate_result == CV_PTLOC_VERTEX ) { // this point is on a vertex
					for(int i = 0; i < count; i++) {
						if(((on_vertex->pt).x == delaunay_points[i].x) && ((on_vertex->pt).y == delaunay_points[i].y)) {
							cvSet2D( image1, y, x, cvGet2D( image2, cvPointFrom32f(dest_points[i]).y, cvPointFrom32f(dest_points[i]).x ) );
							break;
						}
					}
				}
				else if( locate_result == CV_PTLOC_ON_EDGE ) { // this point is on an edge
					CvSubdiv2DPoint* org_pt;
					CvSubdiv2DPoint* dst_pt;
					CvPoint org_pt_warp;
					CvPoint dst_pt_warp;
						
					org_pt = cvSubdiv2DEdgeOrg(on_edge);
					dst_pt = cvSubdiv2DEdgeDst(on_edge);

					for(int i = 0; i < count; i++) {
						if(((org_pt->pt).x == delaunay_points[i].x) && ((org_pt->pt).y == delaunay_points[i].y)) {
							org_pt_warp = cvPointFrom32f(dest_points[i]);
						}
						if(((dst_pt->pt).x == delaunay_points[i].x) && ((dst_pt->pt).y == delaunay_points[i].y)) {
							dst_pt_warp = cvPointFrom32f(dest_points[i]);
						}
					}

					// compute vector length of original edge and current point
					double original_length;
					double cur_length; 
					if( (int)((org_pt->pt).x) == curpoint.x ) { // vertical line
						original_length = fabs((org_pt->pt).y - (dst_pt->pt).y);
						cur_length = fabs((org_pt->pt).y - curpoint.y);
					}
					else if( (int)((org_pt->pt).y) == curpoint.y ) { // horizontal line
						original_length = fabs((org_pt->pt).x - (dst_pt->pt).x);
						cur_length = fabs((org_pt->pt).x - curpoint.x);
					}
					else { // sloped line
				 		original_length = sqrt(pow((org_pt->pt).x - (dst_pt->pt).x, 2.0) + pow((org_pt->pt).y - (dst_pt->pt).y, 2.0));
						cur_length = sqrt(pow((org_pt->pt).x - curpoint.x, 2.0) + pow((org_pt->pt).y - curpoint.y, 2.0));
					}
					// compute ratio of this point on the edge
					double ratio = cur_length / original_length;
					// copy this point from the destination edge
					CvPoint point_in_original;
					int warped_x = (int)(org_pt_warp.x - dst_pt_warp.x);
					int warped_y = (int)(org_pt_warp.y - dst_pt_warp.y);
					if( org_pt_warp.x == curpoint.x ) { // vertical line
						point_in_original.y = (int)(org_pt_warp.y + (ratio * (org_pt_warp.y - dst_pt_warp.y)));
						point_in_original.x = org_pt_warp.x;
					}
					else if(org_pt_warp.y == curpoint.y) { // horizontal line
						point_in_original.x = (int)(org_pt_warp.x + (ratio * (org_pt_warp.x - dst_pt_warp.x)));
						point_in_original.y = org_pt_warp.y;
					}
					else { // sloped line
						double destination_length = sqrt(pow((org_pt_warp).x - (dst_pt_warp).x, 2.0) + pow((org_pt_warp).y - (dst_pt_warp).y, 2.0));
						double scaled_length = ratio * destination_length;
						double dest_angle = atan(fabs( (double)warped_y / (double)warped_x ));
						double xdist = scaled_length * cos(dest_angle);
						double ydist = scaled_length * sin(dest_angle);
						xdist = warped_x > 0 ? xdist : xdist * -1;
						ydist = warped_y > 0 ? ydist : ydist * -1;
						point_in_original.x = (int)( org_pt_warp.x + xdist);
						point_in_original.y = (int)( org_pt_warp.y + ydist);
					}
					
					if((point_in_original.x >= 0) && (point_in_original.y >= 0) && (point_in_original.x < (image1->width)) && (point_in_original.y < (image1->height))) {
						cvSet2D( image1, y, x, cvGet2D( image2, point_in_original.y, point_in_original.x ) );
					}
					else {
						printf("Edge point outside image\n");
					}
					// cvSet2D( image1, y, x, cvGet2D( image2, (int)(org_pt_warp.x + (ratio * (org_pt_warp.x - dst_pt_warp.x))), 
					//			(int)(org_pt_warp.y + (ratio * (org_pt_warp.y - dst_pt_warp.y))) ) );
				}
				else if( locate_result == CV_PTLOC_INSIDE ) { // this point is inside a facet (triangle)
					/*
					printf("Point inside facet: %d, %d\n",curpoint.x,curpoint.y);
					int count = 0;
					CvPoint * origins = (CvPoint*)malloc(sizeof(CvPoint)*3);
					CvSubdiv2DEdge t = on_edge;
					// count number of edges
					do {
						CvSubdiv2DPoint* pt = cvSubdiv2DEdgeOrg( t );
						if(count < 3) {
							origins[count] = cvPoint( cvRound(pt->pt.x), cvRound(pt->pt.y));
							printf("%d,%d\t",origins[count].x,origins[count].y);
						}
						count++;
						t = cvSubdiv2DGetEdge( t, CV_NEXT_AROUND_LEFT );
					} while(t != on_edge);
					printf("\n");

					free(origins);
					*/
				}
			}
		}
	}
#endif // OLD_BUSTED
	printf("done.\n");

	cvSaveImage("fullwarp.jpg", image1);

	printf("Drawing subdivisions on warped image...");
	draw_subdiv( image1, delaunay, NULL, NULL, 0, NULL );
	// draw_subdiv( image1, delaunay, delaunay_points, source_points, count, status );
	printf("done.\n");
	
	cvSaveImage("edgeswarp.jpg", image1);

	cvReleaseImage(&image2);

	image2 = cvLoadImage(im2fname, CV_LOAD_IMAGE_COLOR);
	// cvCreateImage( cvGetSize(image2), IPL_DEPTH_8U, 3 );

	// cvCalcSubdivVoronoi2D( delaunay );
	printf("Drawing subdivisions on unwarped image...");
	draw_subdiv( image2, delaunay, delaunay_points, dest_points, count, status );
	// draw_subdiv( image2, delaunay, NULL, NULL, 0, NULL );
	printf("done.\n");

	cvSaveImage("edges.jpg",image2);

	cvReleaseImage(&image1);
	cvFree(&source_points);
	cvFree(&dest_points);
	cvFree(&status);
	cvReleaseMemStorage(&storage);
	cvFree(&delaunay_points);

	cvReleaseImage(&image2);

	return 0;
}
Example #25
0
int main( int argc, char* argv[] ) {

	//IplImage* img = cvCreateImage(imSize,IPL_DEPTH_8U,3);
	IplImage* img = cvLoadImage(imcd0,CV_LOAD_IMAGE_UNCHANGED);
	IplImage* imgA = cvLoadImage(imcd0,CV_LOAD_IMAGE_GRAYSCALE);
	IplImage* imgB = cvLoadImage(imcd1,CV_LOAD_IMAGE_GRAYSCALE);
	imSize = cvSize(img->width,img->height);
	rmax=0.8*((imSize.width>imSize.height)?imSize.height/2:imSize.width/2);
	rmin=0.2*((imSize.width>imSize.height)?imSize.height/2:imSize.width/2);
	lx=0.5*imSize.width;
	ly=0.5*imSize.height;
	int win_siz	= 7;
	int arr_siz	= NUMX*NUMY;
	CvPoint2D32f p0 = cvPoint2D32f(imSize.width/2,imSize.height/2);
	IplImage*	pyr = cvCreateImage(imSize,8,1);
	IplImage*	pyr_old = cvCreateImage(imSize,8,1);
	char* status	=0;
	status = (char*)cvAlloc(arr_siz);


	cvNamedWindow("testWindow");
	cvNamedWindow("ImgA");
	cvShowImage("ImgA", imgA);
	cvNamedWindow("ImgB");
	cvShowImage("ImgB", imgB);

	CvPoint2D32f*	arrg		= new CvPoint2D32f[arr_siz];
	CvPoint2D32f*	arrg_old	= new CvPoint2D32f[arr_siz];

	int counter=0;
	for(int x=0; x<NUMX; x++) {
		for(int y=0; y<NUMY; y++) {
			arrg_old[counter].x = p0.x + (-lx/2) + lx*x/NUMX;
			arrg_old[counter].y = p0.y + (-ly/2) + lx*y/NUMY;
			counter++;
		}
	}
	cout << "f**k-0" << endl;
	for(int i=0; i<arr_siz; i++) {
		cvLine(img,cvPointFrom32f(arrg_old[i]),cvPointFrom32f(arrg_old[i]),CV_RGB(0,0,0),4);
	}
	cvShowImage("testWindow",img);
	cvWaitKey(100);
	cout << "f**k-1" << endl;

	cvFindCornerSubPix(imgA,
	        			arrg_old,
	        			arr_siz,
	        			cvSize(win_siz,win_siz),
	        			cvSize(2,2),
	        			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
	//cvReleaseImage(&img);
	//img = cvLoadImage(imcd0,CV_LOAD_IMAGE_UNCHANGED);
	cout << "f**k-2" << endl;
	for(int i=0; i<arr_siz; i++) {
		cvLine(img,cvPointFrom32f(arrg_old[i]),cvPointFrom32f(arrg_old[i]),CV_RGB(255,0,255),4);
	}
	cvShowImage("testWindow",img);
	cvWaitKey(100);
	cout << "f**k-3" << endl;

	float errors[arr_siz];
	cvCalcOpticalFlowPyrLK(imgA,imgB,
	        			pyr_old, pyr,
	        			arrg_old,
	        			arrg,
	        			arr_siz,
	        			cvSize(win_siz,win_siz),
	        			5,
	        			status,
	        			errors,
	        			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.3),
	        			0);

	CvPoint2D32f dp, dp2;
	CvPoint2D32f center = cvPoint2D32f(0., 0.);
	bool arr_draw[arr_siz];
	int count = 0;
	for(int i=0; i<arr_siz; i++) {
		cvLine(img,cvPointFrom32f(arrg[i]),cvPointFrom32f(arrg[i]),CV_RGB(0,255,0),4);
		CvScalar color = CV_RGB(255,0,0);
		dp = getDp(arrg[i],arrg_old[i]);
		double len = getLength(dp);
//		if(errors[i]<50) {
		if(getLength(dp)>3) {
			color = CV_RGB(255,0,0);
		} else {
			color = CV_RGB(100,255,100);
		}
		int nc = i+1;
		arr_draw[i] = false;
		if((nc>-1) && (nc<arr_siz) && len>3) {
			dp2=getDp(arrg[nc],arrg_old[nc]);
			if(getLength(dp2)>2) {
				CvPoint2D32f ctmp = getCrossPoint(arrg_old[i],getOrtoVec(dp), arrg_old[nc],getOrtoVec(dp2));
//				cvLine(img,cvPointFrom32f(arrg_old[i]),cvPointFrom32f(ctmp),CV_RGB(0,0,0),1);
//				cvLine(img,cvPointFrom32f(arrg[i]),cvPointFrom32f(ctmp),CV_RGB(0,0,0),1);
				center = getSum(center,ctmp);
				count++;
				arr_draw[i] = true;
			}
		}
		drawArrow(img,arrg_old[i],arrg[i],color,2,15.);
		cout << "status=[" << (int)status[i] << "], error=[" << errors[i] << "]" << endl;
//		cout << "[" << arrg[i].x << "," << arrg[i].y << "]" << endl;

	}
	center=getDiv(center,count);

	cvCircle(img,cvPointFrom32f(center),10,CV_RGB(0,200,0),1);
	double df = 0;
	for(int i=0; i<arr_siz; i++) {
		if(arr_draw[i]) {
			cvLine(img, cvPointFrom32f(center), cvPointFrom32f(arrg_old[i]),CV_RGB(0,0,0),1);
			cvLine(img, cvPointFrom32f(center), cvPointFrom32f(arrg[i]),CV_RGB(0,0,0),1);
			df += 180.0*(getLength(getDel(arrg[i],arrg_old[i])))
			/(CV_PI*getLength(getDel(arrg_old[i],center)));
		}
	}
	CvFont font, fontbg;
	cvInitFont(&font,CV_FONT_HERSHEY_PLAIN, 2, 2, 0.0, 2, CV_AA);
	cvInitFont(&fontbg,CV_FONT_HERSHEY_PLAIN, 2, 2, 0.0, 8, CV_AA);
	char buff[100];
	bzero(buff,sizeof(buff));
	sprintf(buff,"angle=%0.1f degres",(df/count));
	cvPutText(img,buff,cvPoint(10,25),&fontbg,CV_RGB(0,0,0));
	cvPutText(img,buff,cvPoint(10,25),&font,CV_RGB(255,0,0));

/*
	for(int r=0; r<NUMR; r++) {
		for(int f=0; f<NUMF; f++) {
			double pfi = 2*CV_PI*f/NUMF;
			double ro	= rmin + (rmax-rmin)*r/NUMR;
			p1.x = p0.x + ro*cos(pfi);
			p1.y = p0.y + ro*sin(pfi);
			//cvLine(img,cvPointFrom32f(p1),cvPointFrom32f(p1),CV_RGB(0,0,255),2);
			drawArrow(img,p0,p1,CV_RGB(255,0,0));
		}
	}
*/
	cvShowImage("testWindow",img);
	cvWaitKey(0);

	cvDestroyWindow("testWindow");
	cvReleaseImage(&img);
	cout << "Shutdown" << endl;
	return 0;
}
Example #26
0
void HarrisBuffer::CalculateVelocityHistories()
{
  if (pointsToTrack.size()>0)
  {

    CvPoint2D32f* prev_features=(CvPoint2D32f*)malloc((int)pointsToTrack.size()*sizeof(CvPoint2D32f));
    CvPoint2D32f* curr_features=(CvPoint2D32f*)malloc((int)pointsToTrack.size()*sizeof(CvPoint2D32f));
    char * foundFeature=(char *)malloc((int)pointsToTrack.size()*sizeof(char));

    CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( 
        CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

    int i=0;
    std::list<DetectedTrackingPoint>::iterator it;

    for(it=pointsToTrack.begin();it!=pointsToTrack.end(); ++it)
    {
      if ((*it).trajectory.size()==0)
        (*it).trajectory.push_back(cvPoint2D32f((*it).x,(*it).y));
      prev_features[i]= (*it).trajectory.back();
      i++;
    }

    int tempFrameNum = pointsToTrack.begin()->t;
    tempFrameNum += (int) (pointsToTrack.begin()->trajectory.size()) - 1;

    original.GetFrame(tempFrameNum, opticalFlowLastFrame);
    original.GetFrame(tempFrameNum+1,opticalFlowNextFrame);

    cvScale(opticalFlowLastFrame, opticalFlowLastFrame8u, 255.0, 0.0);
    cvScale(opticalFlowNextFrame, opticalFlowNextFrame8u, 255.0, 0.0);

    if (pointsToTrack.size()>0)
    {
      cvCalcOpticalFlowPyrLK(opticalFlowLastFrame8u,opticalFlowNextFrame8u,NULL,NULL,prev_features,
          curr_features,(int)pointsToTrack.size(),cvSize(3,3),0,foundFeature,NULL,
          optical_flow_termination_criteria,0);
    }	

    i=0;

    for(it=pointsToTrack.begin();it!=pointsToTrack.end(); ++it)
    {
      if (foundFeature[i])
        (*it).trajectory.push_back(cvPoint2D32f(curr_features[i].x,curr_features[i].y));
      i++;
    }

    i=0;

    for(it=pointsToTrack.begin();it!=pointsToTrack.end(); ++it)
    {
      if (!(foundFeature[i]))
      { 
        (*it).trackingFinished=true;
        pointsFinishedTracking.push_back(*it);
      }
      i++;
    }


    free(prev_features);
    free(curr_features);
    free(foundFeature);

  }// if pointsToTrack.size()>0
}
int do_example_for_optical_flow(void)
{
	/* Create an object that decodes the input video stream. */
	CvCapture *input_video = cvCaptureFromFile(
		//"C:\\Documents and Settings\\David Stavens\\Desktop\\223B-Demo\\optical_flow_input.avi"
		//"C:\\Users\\Ran_the_User\\Documents\\Technion_Studies\\2016_A_winter\\02_Aerial_Video_PROJECT\\video-examples\\AnimalsMovingZSL17_07_14.mp4"
		//"C:\\Users\\Ran_the_User\\Documents\\Technion_Studies\\2016_A_winter\\02_Aerial_Video_PROJECT\\\\AnimalsMovingZSL17_07_14.mp4"
		"C:\\Users\\Ran_the_User\\Documents\\Technion_Studies\\2016_A_winter\\02_Aerial_Video_PROJECT\\video-examples\\optical_flow_input.avi"

		);
	if (input_video == NULL)
	{
		/* Either the video didn't exist OR it uses a codec OpenCV
		 * doesn't support.
		 */
		fprintf(stderr, "Error: Can't open video.\n");
		return -1;
	}

	/* Read the video's frame size out of the AVI. */
	CvSize frame_size;
	frame_size.height =		(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_HEIGHT );
	frame_size.width  =		(int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_WIDTH );

	/* Determine the number of frames in the AVI. */
	long number_of_frames;
	/* Go to the end of the AVI (ie: the fraction is "1") */
	cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_AVI_RATIO, 1. );
	/* Now that we're at the end, read the AVI position in frames */
	number_of_frames = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES );
	/* Return to the beginning */
	cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, 0. );

	/* Create a windows called "Optical Flow" for visualizing the output.
	 * Have the window automatically change its size to match the output.
	 */
	cvNamedWindow("Optical Flow", CV_WINDOW_NORMAL); /// ran change:  CV_WINDOW_AUTOSIZE);
	cv::resizeWindow("Optical Flow", frame_size.width/ windowShrinkFactor, frame_size.height/ windowShrinkFactor);

	long current_frame = 0;
	while(true)
	{
		static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL, *frame2_1C = NULL, *eig_image = NULL, *temp_image = NULL, *pyramid1 = NULL, *pyramid2 = NULL;
		//static cvResize smaller = 

//		cv::resize(src, src, img.size());
		/* Go to the frame we want.  Important if multiple frames are queried in
		 * the loop which they of course are for optical flow.  Note that the very
		 * first call to this is actually not needed. (Because the correct position
		 * is set outsite the for() loop.)
		 */
		cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, current_frame );

		/* Get the next frame of the video.
		 * IMPORTANT!  cvQueryFrame() always returns a pointer to the _same_
		 * memory location.  So successive calls:
		 * frame1 = cvQueryFrame();
		 * frame2 = cvQueryFrame();
		 * frame3 = cvQueryFrame();
		 * will result in (frame1 == frame2 && frame2 == frame3) being true.
		 * The solution is to make a copy of the cvQueryFrame() output.
		 */
		frame = cvQueryFrame( input_video );
		if (frame == NULL)
		{
			/* Why did we get a NULL frame?  We shouldn't be at the end. */
			fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
			return -1;
		}
		/* Allocate another image if not already allocated.
		 * Image has ONE channel of color (ie: monochrome) with 8-bit "color" depth.
		 * This is the image format OpenCV algorithms actually operate on (mostly).
		 */
		allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );
		/* Convert whatever the AVI image format is into OpenCV's preferred format.
		 * AND flip the image vertically.  Flip is a shameless hack.  OpenCV reads
		 * in AVIs upside-down by default.  (No comment :-))
		 */
		cvConvertImage(frame, frame1_1C, 0);//  CV_CVTIMG_FLIP);

		/* We'll make a full color backup of this frame so that we can draw on it.
		 * (It's not the best idea to draw on the static memory space of cvQueryFrame().)
		 */
		allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
		cvConvertImage(frame, frame1, 0);//  CV_CVTIMG_FLIP);

		/* Get the second frame of video.  Same principles as the first. */
		frame = cvQueryFrame( input_video );
		if (frame == NULL)
		{
			fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
			return -1;
		}
		allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
		cvConvertImage(frame, frame2_1C, 0);//  CV_CVTIMG_FLIP);

		/* Shi and Tomasi Feature Tracking! */

		/* Preparation: Allocate the necessary storage. */
		allocateOnDemand( &eig_image, frame_size, IPL_DEPTH_32F, 1 );
		allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );

		/* Preparation: This array will contain the features found in frame 1. */
		CvPoint2D32f frame1_features[NUM_OF_FEATURES];

		/* Preparation: BEFORE the function call this variable is the array size
		 * (or the maximum number of features to find).  AFTER the function call
		 * this variable is the number of features actually found.
		 */
		int number_of_features;
		
		/* I'm hardcoding this at 400.  But you should make this a #define so that you can
		 * change the number of features you use for an accuracy/speed tradeoff analysis.
		 */
		number_of_features = NUM_OF_FEATURES;

		/* Actually run the Shi and Tomasi algorithm!!
		 * "frame1_1C" is the input image.
		 * "eig_image" and "temp_image" are just workspace for the algorithm.
		 * The first ".01" specifies the minimum quality of the features (based on the eigenvalues).
		 * The second ".01" specifies the minimum Euclidean distance between features.
		 * "NULL" means use the entire input image.  You could point to a part of the image.
		 * WHEN THE ALGORITHM RETURNS:
		 * "frame1_features" will contain the feature points.
		 * "number_of_features" will be set to a value <= 400 indicating the number of feature points found.
		 */
		cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features, &number_of_features, .01, .01, NULL);

		/* Pyramidal Lucas Kanade Optical Flow! */

		/* This array will contain the locations of the points from frame 1 in frame 2. */
		CvPoint2D32f frame2_features[NUM_OF_FEATURES];

		/* The i-th element of this array will be non-zero if and only if the i-th feature of
		 * frame 1 was found in frame 2.
		 */
		char optical_flow_found_feature[NUM_OF_FEATURES];

		/* The i-th element of this array is the error in the optical flow for the i-th feature
		 * of frame1 as found in frame 2.  If the i-th feature was not found (see the array above)
		 * I think the i-th entry in this array is undefined.
		 */
		float optical_flow_feature_error[NUM_OF_FEATURES];

		/* This is the window size to use to avoid the aperture problem (see slide "Optical Flow: Overview"). */
		CvSize optical_flow_window = cvSize(3,3);
		
		/* This termination criteria tells the algorithm to stop when it has either done 20 iterations or when
		 * epsilon is better than .3.  You can play with these parameters for speed vs. accuracy but these values
		 * work pretty well in many situations.
		 */
		CvTermCriteria optical_flow_termination_criteria
			= cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

		/* This is some workspace for the algorithm.
		 * (The algorithm actually carves the image into pyramids of different resolutions.)
		 */
		allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
		allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );

		/* Actually run Pyramidal Lucas Kanade Optical Flow!!
		 * "frame1_1C" is the first frame with the known features.
		 * "frame2_1C" is the second frame where we want to find the first frame's features.
		 * "pyramid1" and "pyramid2" are workspace for the algorithm.
		 * "frame1_features" are the features from the first frame.
		 * "frame2_features" is the (outputted) locations of those features in the second frame.
		 * "number_of_features" is the number of features in the frame1_features array.
		 * "optical_flow_window" is the size of the window to use to avoid the aperture problem.
		 * "5" is the maximum number of pyramids to use.  0 would be just one level.
		 * "optical_flow_found_feature" is as described above (non-zero iff feature found by the flow).
		 * "optical_flow_feature_error" is as described above (error in the flow for this feature).
		 * "optical_flow_termination_criteria" is as described above (how long the algorithm should look).
		 * "0" means disable enhancements.  (For example, the second array isn't pre-initialized with guesses.)
		 */
		cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, number_of_features, optical_flow_window, 5, optical_flow_found_feature, optical_flow_feature_error, optical_flow_termination_criteria, 0 );
		
		/* For fun (and debugging :)), let's draw the flow field. */
		for(int i = 0; i < number_of_features; i++)
		{
			/* If Pyramidal Lucas Kanade didn't really find the feature, skip it. */
			if ( optical_flow_found_feature[i] == 0 )	continue;

			int line_thickness;				line_thickness = 1;
			/* CV_RGB(red, green, blue) is the red, green, and blue components
			 * of the color you want, each out of 255.
			 */	
			CvScalar line_color;			line_color = CV_RGB(255,0,250);
	
			/* Let's make the flow field look nice with arrows. */

			/* The arrows will be a bit too short for a nice visualization because of the high framerate
			 * (ie: there's not much motion between the frames).  So let's lengthen them by a factor of 3.
			 */
			CvPoint p,q;
			p.x = (int) frame1_features[i].x;
			p.y = (int) frame1_features[i].y;
			q.x = (int) frame2_features[i].x;
			q.y = (int) frame2_features[i].y;

			double angle;		angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
			double hypotenuse;	hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) );

			/* Here we lengthen the arrow by a factor of three. */
			q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
			q.y = (int) (p.y - 3 * hypotenuse * sin(angle));

			/* Now we draw the main line of the arrow. */
			/* "frame1" is the frame to draw on.
			 * "p" is the point where the line begins.
			 * "q" is the point where the line stops.
			 * "CV_AA" means antialiased drawing.
			 * "0" means no fractional bits in the center cooridinate or radius.
			 */
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
			/* Now draw the tips of the arrow.  I do some scaling so that the
			 * tips look proportional to the main line of the arrow.
			 */			
			p.x = (int) (q.x + 9 * cos(angle + pi / 4));
			p.y = (int) (q.y + 9 * sin(angle + pi / 4));
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
			p.x = (int) (q.x + 9 * cos(angle - pi / 4));
			p.y = (int) (q.y + 9 * sin(angle - pi / 4));
			cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
		}
		/* Now display the image we drew on.  Recall that "Optical Flow" is the name of
		 * the window we created above.
		 */
		cvShowImage("Optical Flow", frame1);
		/* And wait for the user to press a key (so the user has time to look at the image).
		 * If the argument is 0 then it waits forever otherwise it waits that number of milliseconds.
		 * The return value is the key the user pressed.
		 */
		int key_pressed;
		key_pressed = cvWaitKey(1);  //0

		/* If the users pushes "b" or "B" go back one frame.
		 * Otherwise go forward one frame.
		 */
		if (key_pressed == 'b' || key_pressed == 'B')	current_frame--;
		else											current_frame++;
		/* Don't run past the front/end of the AVI. */
		if (current_frame < 0)						current_frame = 0;
		if (current_frame >= number_of_frames - 1)	current_frame = number_of_frames - 2;
		if (key_pressed == 27) break;
	}

}
Example #28
0
void processImagePair(const char *file1, const char *file2, CvVideoWriter *out, struct CvMat *currentOrientation) {
  // Load two images and allocate other structures
	IplImage* imgA = cvLoadImage(file1, CV_LOAD_IMAGE_GRAYSCALE);
	IplImage* imgB = cvLoadImage(file2, CV_LOAD_IMAGE_GRAYSCALE);
	IplImage* imgBcolor = cvLoadImage(file2);
 
	CvSize img_sz = cvGetSize( imgA );
	int win_size = 15;
  
	// Get the features for tracking
	IplImage* eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
	IplImage* tmp_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
 
	int corner_count = MAX_CORNERS;
	CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
 
	cvGoodFeaturesToTrack( imgA, eig_image, tmp_image, cornersA, &corner_count,
		0.05, 3.0, 0, 3, 0, 0.04 );
 
  fprintf(stderr, "%s: Corner count = %d\n", file1, corner_count);
 
	cvFindCornerSubPix( imgA, cornersA, corner_count, cvSize( win_size, win_size ),
		cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 50, 0.03 ) );
 
	// Call Lucas Kanade algorithm
	char features_found[ MAX_CORNERS ];
	float feature_errors[ MAX_CORNERS ];
 
	CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
 
	IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
	IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
 
	CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ];
 
  calcNecessaryImageRotation(imgA);
 
	cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB, cornersA, cornersB, corner_count, 
		cvSize( win_size, win_size ), 5, features_found, feature_errors,
		 cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0 );
 
   CvMat *transform = cvCreateMat(3,3, CV_32FC1);
   CvMat *invTransform = cvCreateMat(3,3, CV_32FC1);
	// Find a homography based on the gradient
   CvMat cornersAMat = cvMat(1, corner_count, CV_32FC2, cornersA);
   CvMat cornersBMat = cvMat(1, corner_count, CV_32FC2, cornersB);
   cvFindHomography(&cornersAMat, &cornersBMat, transform, CV_RANSAC, 15, NULL);

   cvInvert(transform, invTransform);
   cvMatMul(currentOrientation, invTransform, currentOrientation);
   // save the translated image
 	 IplImage* trans_image = cvCloneImage(imgBcolor);
   cvWarpPerspective(imgBcolor, trans_image, currentOrientation, CV_INTER_CUBIC+CV_WARP_FILL_OUTLIERS);

   printf("%s:\n", file1);
   PrintMat(currentOrientation);

  // cvSaveImage(out, trans_image);
  cvWriteFrame(out, trans_image);

  cvReleaseImage(&eig_image);
  cvReleaseImage(&tmp_image);  
  cvReleaseImage(&trans_image);
  cvReleaseImage(&imgA);
  cvReleaseImage(&imgB);
  cvReleaseImage(&imgBcolor);
  cvReleaseImage(&pyrA);
  cvReleaseImage(&pyrB);
  
  cvReleaseData(transform);
  delete [] cornersA;
  delete [] cornersB;
  
  
}
Example #29
0
int main()
{
	const int MAX_CORNERS = 400;
	CvCapture* cap = NULL;
	IplImage *frame, *frameGrey, *greyProc[2];
	IplImage *tmp, *eig;
	IplImage *pryProc[2];
	CvPoint2D32f corners[2][MAX_CORNERS];
	char  statusVector[MAX_CORNERS];
	float errorVector[MAX_CORNERS];
	int dblBuff = 0;
	int isReady = 0;

	cap = cvCreateCameraCapture(0);	
	assert(cap);

	// set capture size
	cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH,  640);
	cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, 480);

	IMU_FD = open("/dev/i2c-1", O_RDWR);
	
	// icInit();
	// pthread_create(&IMU_THREAD, NULL, imuHandler, NULL);

	cvNamedWindow("AVC", CV_WINDOW_AUTOSIZE); //resizable window;
	int cornerCount = MAX_CORNERS;

	while(1){
		IplImage* currFrame = cvQueryFrame(cap);
		if(!currFrame) continue;
		if(!frame){
			frame = cvCloneImage(cvQueryFrame(cap));
			CvSize frameSize = cvSize(frame->width, frame->height);
			frameGrey    = cvCreateImage(frameSize, 8, 1);
			greyProc[0]  = cvCreateImage(frameSize, 8, 1);
			greyProc[1]  = cvCloneImage(greyProc[0]);
			pryProc[0]   = cvCloneImage(greyProc[0]);
			pryProc[1]   = cvCloneImage(greyProc[0]);

			tmp = cvCreateImage(frameSize, IPL_DEPTH_32F, 1);
			eig = cvCreateImage(frameSize, IPL_DEPTH_32F, 1);
		}
		
		cvCopy(currFrame, frame, 0);
		
		// convert the frame to black and white
		cvCvtColor(frame, greyProc[dblBuff], CV_BGR2GRAY);
		// cvPyrDown(frameGrey, greyProc[dblBuff], CV_GAUSSIAN_5x5);

		if(isReady)
		cvCalcOpticalFlowPyrLK(
			greyProc[!dblBuff],
			greyProc[dblBuff],
			pryProc[!dblBuff],
			pryProc[dblBuff],
			corners[!dblBuff],
			corners[dblBuff],
			cornerCount,
			cvSize(3, 3),
			5,                 // pyr level (0 not used)
			statusVector,      // list of bools which inicate feature matches
			errorVector,
			cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.01),
			1
		);

		for(int i = cornerCount; i--;){
			cvCircle(
				frame,
				cvPoint(corners[dblBuff][i].x, corners[dblBuff][i].y),
				3,
				cvScalar(255, 0, 0, 255),
				1,
				8,
				0
			);
			
			if(!statusVector[i]) continue;

			float dx = (corners[dblBuff][i].x - corners[!dblBuff][i].x) * 10;
			float dy = (corners[dblBuff][i].y - corners[!dblBuff][i].y) * 10;
			float depth = dx * dx + dy * dy;

			depth = pow(depth, 64);

			cvLine(
				frame,
				cvPoint(corners[dblBuff][i].x, corners[dblBuff][i].y),
				cvPoint((corners[dblBuff][i].x + dx) , (corners[dblBuff][i].y + dy)),
				cvScalar(depth, dy + 128, dx + 128, 255 / (errorVector[i] + 1)),
				1, 
				8,
				0
			);
		}

		cvShowImage("AVC", frame);

		// detect corners
		cornerCount = 400;
		cvGoodFeaturesToTrack(
			greyProc[dblBuff],
			NULL,
			NULL,
			corners[dblBuff],
			&cornerCount,
			0.01,        // quality
			0.01,        // min distance
			NULL,        // mask for ROI
			5,           // block size
			0,           // use harris detector
			0.04         // not used (free param of harris)
		);

		dblBuff = !dblBuff;
		isReady = 1;
	}

	cvReleaseImage(&frame);
	cvReleaseCapture(&cap);
	return 0;
}
Example #30
0
int main(int argc, const char * argv[]) {
    
    IplImage* imgA = cvLoadImage( "data/OpticalFlow0.jpg", CV_LOAD_IMAGE_GRAYSCALE );
    IplImage* imgB = cvLoadImage( "data/OpticalFlow1.jpg", CV_LOAD_IMAGE_GRAYSCALE );
    
    CvSize img_sz = cvGetSize( imgA );
    int win_size = 10;
    
    IplImage* imgC = cvLoadImage( "data/OpticalFlow1.jpg", CV_LOAD_IMAGE_UNCHANGED );
    
    IplImage* image_eig = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
    IplImage* image_tmp = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
    
    int corner_count = MAX_CORNERS;
    CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
    CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ];
    
    cvGoodFeaturesToTrack( imgA, image_eig, image_tmp, cornersA, &corner_count, 0.01, 5.0, 0, 3, 0, 0.04 );
    
    cvFindCornerSubPix(
                       imgA,
                       cornersA,
                       corner_count,
                       cvSize(win_size, win_size),
                       cvSize(-1, -1),
                       cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20.0, 0.03)
                       );
    
    char features_found[ MAX_CORNERS ];
    float feature_errors[ MAX_CORNERS ];
    
    CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
    
    IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
    IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
    
    cvCalcOpticalFlowPyrLK(imgA,
                           imgB,
                           pyrA,
                           pyrB,
                           cornersA,
                           cornersB,
                           corner_count,
                           cvSize(win_size, win_size),
                           5,
                           features_found,
                           feature_errors,
                           cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20.0, 0.3),
                           0
                           );
    
    for( int i=0; i<corner_count; i++ ) {
        if( features_found[i]==0|| feature_errors[i]>550 ) {
            printf("Error is %f\n",feature_errors[i]);
            continue;
        }
        CvPoint p0 = cvPoint(
                             cvRound( cornersA[i].x ),
                             cvRound( cornersA[i].y )
                             );
        CvPoint p1 = cvPoint(
                             cvRound( cornersB[i].x ),
                             cvRound( cornersB[i].y )
                             );
        
        cvLine( imgC, p0, p1, CV_RGB(255,0,0),2 );
    }
    cvNamedWindow("ImageA",0);
    cvNamedWindow("ImageB",0);
    cvNamedWindow("LKpyr_OpticalFlow",0);
    cvShowImage("ImageA",imgA);
    cvShowImage("ImageB",imgB);
    cvShowImage("LKpyr_OpticalFlow",imgC);
    cvWaitKey(0);
    
    return 0;

}